diff --git a/Gopkg.lock b/Gopkg.lock index c4e90288..8bbf00c8 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -13,6 +13,14 @@ pruneopts = "UT" revision = "65cc252bf6691cb3c7014bcb2c8dc29de91e3a7e" +[[projects]] + branch = "master" + digest = "1:6716c9fe6333591128e72848f246fc01dc72240e1e64185d8b4e124e7280b35d" + name = "github.com/AndreasBriese/bbloom" + packages = ["."] + pruneopts = "UT" + revision = "e2d15f34fcf99d5dbb871c820ec73f710fca9815" + [[projects]] branch = "master" digest = "1:482fe066e308f0058abdfa302b9b5ff0fa4e89f6c55f103a2ac5e0af329f72cd" @@ -120,6 +128,37 @@ revision = "cbaa98ba5575e67703b32b4b19f73c91f3c4159e" version = "v1.7.1" +[[projects]] + digest = "1:5f5090f05382959db941fa45acbeb7f4c5241aa8ac0f8f4393dec696e5953f53" + name = "github.com/dgraph-io/badger" + packages = [ + ".", + "options", + "protos", + "skl", + "table", + "y", + ] + pruneopts = "UT" + revision = "99233d725dbdd26d156c61b2f42ae1671b794656" + version = "gx/v1.5.4" + +[[projects]] + branch = "master" + digest = "1:6e8109ce247a59ab1eeb5330166c12735f6590de99c9647b6162d11518d32c9a" + name = "github.com/dgryski/go-farm" + packages = ["."] + pruneopts = "UT" + revision = "6a90982ecee230ff6cba02d5bd386acc030be9d3" + +[[projects]] + digest = "1:6f9339c912bbdda81302633ad7e99a28dfa5a639c864061f1929510a9a64aa74" + name = "github.com/dustin/go-humanize" + packages = ["."] + pruneopts = "UT" + revision = "9f541cc9db5d55bce703bd99987c9d5cb8eea45e" + version = "v1.0.0" + [[projects]] digest = "1:edb569dd02419a41ddd98768cc0e7aec922ef19dae139731e5ca750afcf6f4c5" name = "github.com/edsrzf/mmap-go" @@ -129,8 +168,8 @@ version = "v1.0.0" [[projects]] - branch = "rpc_statediffs_at_head" - digest = "1:02b56bb807b0b602f7d64b786c3ad5277f0ee2dc841738904b0bd14576f4d9ed" + branch = "rpc_statediffing" + digest = "1:134065ee8e48c2543ac07aa4db259518cd125725c176ebf8262c3abc27daa227" name = "github.com/ethereum/go-ethereum" packages = [ ".", @@ -192,7 +231,7 @@ "trie", ] pruneopts = "T" - revision = "edf001e1d2296951e7e592c55e66ce074bd62807" + revision = "3018a1b5a4e2e4153874087fa4d8f3597ee4a17c" source = "github.com/vulcanize/go-ethereum" [[projects]] @@ -469,6 +508,30 @@ revision = "aa9190c18f1576be98e974359fd08c64ca0b5a94" version = "v0.0.5" +[[projects]] + digest = "1:8270de0224f4c8ef01e23463a6c6f2a5026a2d3ccf3f2e3145ffcd67d7b9a62c" + name = "github.com/ipfs/go-ds-badger" + packages = ["."] + pruneopts = "UT" + revision = "7fe0af0808f565d460fa8d3851a5808d77f72628" + version = "v0.0.3" + +[[projects]] + digest = "1:f896dc92ae70c70f57ac8d47c0aa3e9fe185afcc35ee807975a621766ee6028f" + name = "github.com/ipfs/go-ds-flatfs" + packages = ["."] + pruneopts = "UT" + revision = "d5e3c1fa14d2fcc187a4a996eea3f48de9d7a5cd" + version = "v0.0.2" + +[[projects]] + digest = "1:ab70bd10c780d127a66393a14061ae69ae0145027e7207b7c43db68524f3f64a" + name = "github.com/ipfs/go-ds-leveldb" + packages = ["."] + pruneopts = "UT" + revision = "47a9627082eeb3e52570a75eb4fdfaff8b2f19a9" + version = "v0.0.2" + [[projects]] digest = "1:afbc88b3730097cd76ea72695941270547a5b3ed00d870ee0612897ac9943d79" name = "github.com/ipfs/go-ds-measure" @@ -486,11 +549,12 @@ version = "v0.0.1" [[projects]] - digest = "1:15f5e953da8605b4edc4e2d5bae64680fe31a8c8da066bcfea4fb87112c8187c" + digest = "1:7b3d464292c42bb5d6e816688aaeb29195ce99df4fbd207e44db63fc38af859d" name = "github.com/ipfs/go-ipfs" packages = [ ".", "core", + "core/coredag", "dagutils", "exchange/reprovide", "filestore", @@ -502,6 +566,12 @@ "p2p", "pin", "pin/internal/pb", + "plugin", + "plugin/loader", + "plugin/plugins/badgerds", + "plugin/plugins/flatfs", + "plugin/plugins/git", + "plugin/plugins/levelds", "provider", "repo", "repo/common", @@ -760,6 +830,14 @@ revision = "e6e9ea4d16a85d09cafb4dace15b978e984fa672" version = "v0.0.1" +[[projects]] + digest = "1:fe257dab08c7455ab2afb1836d9933f09719d53e16c61df9a2e23316798ccfc2" + name = "github.com/ipfs/go-ipld-git" + packages = ["."] + pruneopts = "UT" + revision = "ee620e932c0cf00124e7c1c6f434eb89530caaf8" + version = "v0.0.2" + [[projects]] digest = "1:4638b57014e4a204350087e3a2d5631f8aaa197bb6af688ca6e280457a7a46fa" name = "github.com/ipfs/go-ipns" @@ -1531,6 +1609,28 @@ revision = "fa473d140ef3c6adf42d6b391fe76707f1f243c8" version = "v1.0.0" +[[projects]] + branch = "master" + digest = "1:ae08d850ba158ea3ba4a7bb90f8372608172d8920644e5a6693b940a1f4e5d01" + name = "github.com/mmcloughlin/avo" + packages = [ + "attr", + "build", + "buildtags", + "gotypes", + "internal/prnt", + "internal/stack", + "ir", + "operand", + "pass", + "printer", + "reg", + "src", + "x86", + ] + pruneopts = "UT" + revision = "83fbad1a6b3cba8ac7711170e57953fd12cdc40a" + [[projects]] digest = "1:cf5b7fbff2c87cff6c0e11f87b30edc21abc6592e6a76f41003ca6d5a712cf48" name = "github.com/mr-tron/base58" @@ -1861,7 +1961,7 @@ "pkg/wrappers/rlp", ] pruneopts = "UT" - revision = "b24f61a2b476a6ca31d1b182ca6c4838534b96ab" + revision = "97be848bcc7036b354e7d7b6f10a7a3ac6eed1b1" [[projects]] branch = "master" @@ -2038,7 +2138,7 @@ [[projects]] branch = "master" - digest = "1:e3fb02bc270f8fc06628d2a1dc6811d3753ccaef05ad060c9f6e7c2340ca0e1f" + digest = "1:3d7db3c6e27f9667f5f7c187e18972af1a0e1c6476e0d82c78c78bad398a1442" name = "golang.org/x/net" packages = [ "bpf", @@ -2050,8 +2150,10 @@ ] "internal/iana", "internal/socket", + "internal/timeseries", "ipv4", "ipv6", + "trace", "websocket", ] pruneopts = "UT" @@ -2106,6 +2208,24 @@ revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" version = "v0.3.0" +[[projects]] + branch = "master" + digest = "1:f5ce0b59aeb99ebb725de7b7a35483600e5f119b4daf081e88329322a2de95bc" + name = "golang.org/x/tools" + packages = [ + "go/ast/astutil", + "go/gcexportdata", + "go/internal/gcimporter", + "go/internal/packagesdriver", + "go/packages", + "go/types/typeutil", + "internal/fastwalk", + "internal/gopathwalk", + "internal/semver", + ] + pruneopts = "UT" + revision = "bffc5affc6df36a7c1fee87811e47b69912e721f" + [[projects]] branch = "master" digest = "1:d9d3a231c70b17470fc134900032da48d5c146f0f4a2ce12ceb92704dae8d67d" @@ -2175,6 +2295,8 @@ "github.com/ethereum/go-ethereum/crypto", "github.com/ethereum/go-ethereum/ethclient", "github.com/ethereum/go-ethereum/ethdb", + "github.com/ethereum/go-ethereum/log", + "github.com/ethereum/go-ethereum/node", "github.com/ethereum/go-ethereum/p2p", "github.com/ethereum/go-ethereum/p2p/discv5", "github.com/ethereum/go-ethereum/rlp", @@ -2186,6 +2308,7 @@ "github.com/ipfs/go-blockservice", "github.com/ipfs/go-cid", "github.com/ipfs/go-ipfs/core", + "github.com/ipfs/go-ipfs/plugin/loader", "github.com/ipfs/go-ipfs/repo/fsrepo", "github.com/jmoiron/sqlx", "github.com/lib/pq", diff --git a/Gopkg.toml b/Gopkg.toml index dda6c81e..bbb400a0 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -62,6 +62,10 @@ name = "github.com/ipfs/go-ipfs" version = "0.4.20" +[[override]] + name = "github.com/ipfs/go-ds-badger" + version = "0.0.3" + [prune] go-tests = true unused-packages = true diff --git a/cmd/syncAndPublish.go b/cmd/syncAndPublish.go index 50ad99c2..b3c41b1e 100644 --- a/cmd/syncAndPublish.go +++ b/cmd/syncAndPublish.go @@ -66,7 +66,7 @@ func syncAndPublish() { } wg := &syn.WaitGroup{} - err = processor.Process(wg) + err = processor.SyncAndPublish(wg, nil, nil) if err != nil { log.Fatal(err) } diff --git a/pkg/core/rpc_client.go b/pkg/core/rpc_client.go index 5b69417f..b193cd3c 100644 --- a/pkg/core/rpc_client.go +++ b/pkg/core/rpc_client.go @@ -29,5 +29,5 @@ type RPCClient interface { BatchCall(batch []client.BatchElem) error IpcPath() string SupportedModules() (map[string]string, error) - Subscribe(namespace string, payloadChan interface{}, args ...interface{}) (*rpc.ClientSubscription, error) + Subscribe(namespace string, payloadChan interface{}, subName string, args ...interface{}) (*rpc.ClientSubscription, error) } diff --git a/pkg/fakes/mock_rpc_client.go b/pkg/fakes/mock_rpc_client.go index 190309ea..9ff03b5a 100644 --- a/pkg/fakes/mock_rpc_client.go +++ b/pkg/fakes/mock_rpc_client.go @@ -188,6 +188,6 @@ func (client *MockRPCClient) AssertBatchCalledWith(method string, lengthOfBatch Expect(client.passedMethod).To(Equal(method)) } -func (client *MockRpcClient) Subscribe(namespace string, payloadChan interface{}, args ...interface{}) (*rpc.ClientSubscription, error) { +func (client *MockRpcClient) Subscribe(namespace string, payloadChan interface{}, subName string, args ...interface{}) (*rpc.ClientSubscription, error) { panic("implement me") } diff --git a/pkg/ipfs/api.go b/pkg/ipfs/api.go index 858f8442..15413678 100644 --- a/pkg/ipfs/api.go +++ b/pkg/ipfs/api.go @@ -42,7 +42,7 @@ func NewPublicSeedNodeAPI(snp SyncPublishAndServe) *PublicSeedNodeAPI { } // Subscribe is the public method to setup a subscription that fires off state-diff payloads as they are created -func (api *PublicSeedNodeAPI) Subscribe(ctx context.Context, params *Params) (*rpc.Subscription, error) { +func (api *PublicSeedNodeAPI) Subscribe(ctx context.Context, payloadChan chan ResponsePayload, params *Params) (*rpc.Subscription, error) { // ensure that the RPC connection supports subscriptions notifier, supported := rpc.NotifierFromContext(ctx) if !supported { diff --git a/pkg/ipfs/converter.go b/pkg/ipfs/converter.go index 8fdc5788..fe349ca0 100644 --- a/pkg/ipfs/converter.go +++ b/pkg/ipfs/converter.go @@ -73,7 +73,7 @@ func (pc *Converter) Convert(payload statediff.Payload) (*IPLDPayload, error) { return nil, err } txMeta := &TrxMetaData{ - To: trx.To().Hex(), + To: handleNullAddr(trx.To()), From: from.Hex(), } // txMeta will have same index as its corresponding trx in the convertedPayload.BlockBody @@ -152,3 +152,10 @@ func (pc *Converter) Convert(payload statediff.Payload) (*IPLDPayload, error) { } return convertedPayload, nil } + +func handleNullAddr(to *common.Address) string { + if to == nil { + return "0x0000000000000000000000000000000000000000000000000000000000000000" + } + return to.Hex() +} diff --git a/pkg/ipfs/publisher.go b/pkg/ipfs/publisher.go index b99df6b8..7f9b4d99 100644 --- a/pkg/ipfs/publisher.go +++ b/pkg/ipfs/publisher.go @@ -22,6 +22,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" rlp2 "github.com/ethereum/go-ethereum/rlp" + "github.com/ipfs/go-ipfs/plugin/loader" "github.com/vulcanize/eth-block-extractor/pkg/ipfs" "github.com/vulcanize/eth-block-extractor/pkg/ipfs/eth_block_header" @@ -49,6 +50,18 @@ type Publisher struct { // NewIPLDPublisher creates a pointer to a new Publisher which satisfies the IPLDPublisher interface func NewIPLDPublisher(ipfsPath string) (*Publisher, error) { + l, err := loader.NewPluginLoader("~/.ipfs/plugins") + if err != nil { + return nil, err + } + err = l.Initialize() + if err != nil { + return nil, err + } + err = l.Inject() + if err != nil { + return nil, err + } node, err := ipfs.InitIPFSNode(ipfsPath) if err != nil { return nil, err @@ -98,13 +111,13 @@ func (pub *Publisher) Publish(payload *IPLDPayload) (*CIDPayload, error) { } // Process and publish state leafs - stateLeafCids, err := pub.publishStateNodes(payload.StateNodes) + stateNodeCids, err := pub.publishStateNodes(payload.StateNodes) if err != nil { return nil, err } // Process and publish storage leafs - storageLeafCids, err := pub.publishStorageNodes(payload.StorageNodes) + storageNodeCids, err := pub.publishStorageNodes(payload.StorageNodes) if err != nil { return nil, err } @@ -117,8 +130,8 @@ func (pub *Publisher) Publish(payload *IPLDPayload) (*CIDPayload, error) { UncleCIDS: uncleCids, TransactionCIDs: transactionCids, ReceiptCIDs: receiptsCids, - StateNodeCIDs: stateLeafCids, - StorageNodeCIDs: storageLeafCids, + StateNodeCIDs: stateNodeCids, + StorageNodeCIDs: storageNodeCids, }, nil } diff --git a/pkg/ipfs/repository.go b/pkg/ipfs/repository.go index d1f0bdb8..1729ffa2 100644 --- a/pkg/ipfs/repository.go +++ b/pkg/ipfs/repository.go @@ -14,6 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . +// Still seeing some errors from tx and storage indexing processes... due to fk constraints being broken package ipfs import ( @@ -50,24 +51,27 @@ func (repo *Repository) Index(cidPayload *CIDPayload) error { } for uncleHash, cid := range cidPayload.UncleCIDS { err = repo.indexUncleCID(tx, cid, cidPayload.BlockNumber, uncleHash.Hex()) + if err != nil { + tx.Rollback() + return err + } } - err = repo.indexTransactionAndReceiptCIDs(tx, cidPayload, headerID) + tx.Commit() + err = repo.indexTransactionAndReceiptCIDs(cidPayload, headerID) if err != nil { - tx.Rollback() return err } - err = repo.indexStateAndStorageCIDs(tx, cidPayload, headerID) + err = repo.indexStateAndStorageCIDs(cidPayload, headerID) if err != nil { - tx.Rollback() return err } - return tx.Commit() + return nil } func (repo *Repository) indexHeaderCID(tx *sqlx.Tx, cid, blockNumber, hash string) (int64, error) { var headerID int64 err := tx.QueryRowx(`INSERT INTO public.header_cids (block_number, block_hash, cid, uncle) VALUES ($1, $2, $3, $4) - ON CONFLICT DO UPDATE SET (cid, uncle) = ($3, $4) + ON CONFLICT (block_number, block_hash) DO UPDATE SET (cid, uncle) = ($3, $4) RETURNING id`, blockNumber, hash, cid, false).Scan(&headerID) return headerID, err @@ -75,61 +79,67 @@ func (repo *Repository) indexHeaderCID(tx *sqlx.Tx, cid, blockNumber, hash strin func (repo *Repository) indexUncleCID(tx *sqlx.Tx, cid, blockNumber, hash string) error { _, err := tx.Queryx(`INSERT INTO public.header_cids (block_number, block_hash, cid, uncle) VALUES ($1, $2, $3, $4) - ON CONFLICT DO UPDATE SET (cid, uncle) = ($3, $4)`, + ON CONFLICT (block_number, block_hash) DO UPDATE SET (cid, uncle) = ($3, $4)`, blockNumber, hash, cid, true) return err } -func (repo *Repository) indexTransactionAndReceiptCIDs(tx *sqlx.Tx, payload *CIDPayload, headerID int64) error { +func (repo *Repository) indexTransactionAndReceiptCIDs(payload *CIDPayload, headerID int64) error { + tx, _ := repo.db.Beginx() for hash, trxCidMeta := range payload.TransactionCIDs { var txID int64 err := tx.QueryRowx(`INSERT INTO public.transaction_cids (header_id, tx_hash, cid, dst, src) VALUES ($1, $2, $3, $4, $5) - ON CONFLICT DO UPDATE SET (cid, dst, src) = ($3, $4, $5) + ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src) = ($3, $4, $5) RETURNING id`, headerID, hash.Hex(), trxCidMeta.CID, trxCidMeta.To, trxCidMeta.From).Scan(&txID) if err != nil { + tx.Rollback() return err } receiptCidMeta, ok := payload.ReceiptCIDs[hash] if ok { err = repo.indexReceiptCID(tx, receiptCidMeta, txID) if err != nil { + tx.Rollback() return err } } } - return nil + return tx.Commit() } func (repo *Repository) indexReceiptCID(tx *sqlx.Tx, cidMeta *ReceiptMetaData, txID int64) error { - _, err := tx.Exec(`INSERT INTO public.receipt_cids (tx_id, cid, topic0s) VALUES ($1, $2, $3) - ON CONFLICT DO UPDATE SET (cid, topic0s) = ($2, $3)`, txID, cidMeta.CID, pq.Array(cidMeta.Topic0s)) + _, err := tx.Exec(`INSERT INTO public.receipt_cids (tx_id, cid, topic0s) VALUES ($1, $2, $3)`, + txID, cidMeta.CID, pq.Array(cidMeta.Topic0s)) return err } -func (repo *Repository) indexStateAndStorageCIDs(tx *sqlx.Tx, payload *CIDPayload, headerID int64) error { +func (repo *Repository) indexStateAndStorageCIDs(payload *CIDPayload, headerID int64) error { + tx, _ := repo.db.Beginx() for accountKey, stateCID := range payload.StateNodeCIDs { var stateID int64 err := tx.QueryRowx(`INSERT INTO public.state_cids (header_id, state_key, cid, leaf) VALUES ($1, $2, $3, $4) - ON CONFLICT DO UPDATE SET (cid, leaf) = ($3, $4) + ON CONFLICT (header_id, state_key) DO UPDATE SET (cid, leaf) = ($3, $4) RETURNING id`, headerID, accountKey.Hex(), stateCID.CID, stateCID.Leaf).Scan(&stateID) if err != nil { + tx.Rollback() return err } for _, storageCID := range payload.StorageNodeCIDs[accountKey] { err = repo.indexStorageCID(tx, storageCID, stateID) if err != nil { + tx.Rollback() return err } } } - return nil + return tx.Commit() } func (repo *Repository) indexStorageCID(tx *sqlx.Tx, storageCID StorageNodeCID, stateID int64) error { _, err := repo.db.Exec(`INSERT INTO public.storage_cids (state_id, storage_key, cid, leaf) VALUES ($1, $2, $3, $4) - ON CONFLICT DO UPDATE SET (cid, leaf) = ($3, $4)`, - stateID, storageCID.Key, storageCID.CID, storageCID.Leaf) + ON CONFLICT (state_id, storage_key) DO UPDATE SET (cid, leaf) = ($3, $4)`, + stateID, storageCID.Key.Hex(), storageCID.CID, storageCID.Leaf) return err } diff --git a/pkg/ipfs/streamer.go b/pkg/ipfs/streamer.go index 661c27d1..f3c6c01a 100644 --- a/pkg/ipfs/streamer.go +++ b/pkg/ipfs/streamer.go @@ -30,8 +30,7 @@ type StateDiffStreamer interface { // Streamer is the underlying struct for the StateDiffStreamer interface type Streamer struct { - Client core.RpcClient - PayloadChan chan statediff.Payload + Client core.RpcClient } // NewStateDiffStreamer creates a pointer to a new Streamer which satisfies the StateDiffStreamer interface @@ -43,5 +42,5 @@ func NewStateDiffStreamer(client core.RpcClient) *Streamer { // Stream is the main loop for subscribing to data from the Geth state diff process func (sds *Streamer) Stream(payloadChan chan statediff.Payload) (*rpc.ClientSubscription, error) { - return sds.Client.Subscribe("statediff", sds.PayloadChan) + return sds.Client.Subscribe("statediff", payloadChan, "subscribe") } diff --git a/vendor/github.com/AndreasBriese/bbloom/.travis.yml b/vendor/github.com/AndreasBriese/bbloom/.travis.yml new file mode 100644 index 00000000..4f2ee4d9 --- /dev/null +++ b/vendor/github.com/AndreasBriese/bbloom/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/AndreasBriese/bbloom/LICENSE b/vendor/github.com/AndreasBriese/bbloom/LICENSE new file mode 100644 index 00000000..4b20050e --- /dev/null +++ b/vendor/github.com/AndreasBriese/bbloom/LICENSE @@ -0,0 +1,35 @@ +bbloom.go + +// The MIT License (MIT) +// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt + +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +siphash.go + +// https://github.com/dchest/siphash +// +// Written in 2012 by Dmitry Chestnykh. +// +// To the extent possible under law, the author have dedicated all copyright +// and related and neighboring rights to this software to the public domain +// worldwide. This software is distributed without any warranty. +// http://creativecommons.org/publicdomain/zero/1.0/ +// +// Package siphash implements SipHash-2-4, a fast short-input PRF +// created by Jean-Philippe Aumasson and Daniel J. Bernstein. diff --git a/vendor/github.com/AndreasBriese/bbloom/README.md b/vendor/github.com/AndreasBriese/bbloom/README.md new file mode 100644 index 00000000..d7413c33 --- /dev/null +++ b/vendor/github.com/AndreasBriese/bbloom/README.md @@ -0,0 +1,131 @@ +## bbloom: a bitset Bloom filter for go/golang +=== + +[![Build Status](https://travis-ci.org/AndreasBriese/bbloom.png?branch=master)](http://travis-ci.org/AndreasBriese/bbloom) + +package implements a fast bloom filter with real 'bitset' and JSONMarshal/JSONUnmarshal to store/reload the Bloom filter. + +NOTE: the package uses unsafe.Pointer to set and read the bits from the bitset. If you're uncomfortable with using the unsafe package, please consider using my bloom filter package at github.com/AndreasBriese/bloom + +=== + +changelog 11/2015: new thread safe methods AddTS(), HasTS(), AddIfNotHasTS() following a suggestion from Srdjan Marinovic (github @a-little-srdjan), who used this to code a bloomfilter cache. + +This bloom filter was developed to strengthen a website-log database and was tested and optimized for this log-entry mask: "2014/%02i/%02i %02i:%02i:%02i /info.html". +Nonetheless bbloom should work with any other form of entries. + +~~Hash function is a modified Berkeley DB sdbm hash (to optimize for smaller strings). sdbm http://www.cse.yorku.ca/~oz/hash.html~~ + +Found sipHash (SipHash-2-4, a fast short-input PRF created by Jean-Philippe Aumasson and Daniel J. Bernstein.) to be about as fast. sipHash had been ported by Dimtry Chestnyk to Go (github.com/dchest/siphash ) + +Minimum hashset size is: 512 ([4]uint64; will be set automatically). + +###install + +```sh +go get github.com/AndreasBriese/bbloom +``` + +###test ++ change to folder ../bbloom ++ create wordlist in file "words.txt" (you might use `python permut.py`) ++ run 'go test -bench=.' within the folder + +```go +go test -bench=. +``` + +~~If you've installed the GOCONVEY TDD-framework http://goconvey.co/ you can run the tests automatically.~~ + +using go's testing framework now (have in mind that the op timing is related to 65536 operations of Add, Has, AddIfNotHas respectively) + +### usage + +after installation add + +```go +import ( + ... + "github.com/AndreasBriese/bbloom" + ... + ) +``` + +at your header. In the program use + +```go +// create a bloom filter for 65536 items and 1 % wrong-positive ratio +bf := bbloom.New(float64(1<<16), float64(0.01)) + +// or +// create a bloom filter with 650000 for 65536 items and 7 locs per hash explicitly +// bf = bbloom.New(float64(650000), float64(7)) +// or +bf = bbloom.New(650000.0, 7.0) + +// add one item +bf.Add([]byte("butter")) + +// Number of elements added is exposed now +// Note: ElemNum will not be included in JSON export (for compatability to older version) +nOfElementsInFilter := bf.ElemNum + +// check if item is in the filter +isIn := bf.Has([]byte("butter")) // should be true +isNotIn := bf.Has([]byte("Butter")) // should be false + +// 'add only if item is new' to the bloomfilter +added := bf.AddIfNotHas([]byte("butter")) // should be false because 'butter' is already in the set +added = bf.AddIfNotHas([]byte("buTTer")) // should be true because 'buTTer' is new + +// thread safe versions for concurrent use: AddTS, HasTS, AddIfNotHasTS +// add one item +bf.AddTS([]byte("peanutbutter")) +// check if item is in the filter +isIn = bf.HasTS([]byte("peanutbutter")) // should be true +isNotIn = bf.HasTS([]byte("peanutButter")) // should be false +// 'add only if item is new' to the bloomfilter +added = bf.AddIfNotHasTS([]byte("butter")) // should be false because 'peanutbutter' is already in the set +added = bf.AddIfNotHasTS([]byte("peanutbuTTer")) // should be true because 'penutbuTTer' is new + +// convert to JSON ([]byte) +Json := bf.JSONMarshal() + +// bloomfilters Mutex is exposed for external un-/locking +// i.e. mutex lock while doing JSON conversion +bf.Mtx.Lock() +Json = bf.JSONMarshal() +bf.Mtx.Unlock() + +// restore a bloom filter from storage +bfNew := bbloom.JSONUnmarshal(Json) + +isInNew := bfNew.Has([]byte("butter")) // should be true +isNotInNew := bfNew.Has([]byte("Butter")) // should be false + +``` + +to work with the bloom filter. + +### why 'fast'? + +It's about 3 times faster than William Fitzgeralds bitset bloom filter https://github.com/willf/bloom . And it is about so fast as my []bool set variant for Boom filters (see https://github.com/AndreasBriese/bloom ) but having a 8times smaller memory footprint: + + + Bloom filter (filter size 524288, 7 hashlocs) + github.com/AndreasBriese/bbloom 'Add' 65536 items (10 repetitions): 6595800 ns (100 ns/op) + github.com/AndreasBriese/bbloom 'Has' 65536 items (10 repetitions): 5986600 ns (91 ns/op) + github.com/AndreasBriese/bloom 'Add' 65536 items (10 repetitions): 6304684 ns (96 ns/op) + github.com/AndreasBriese/bloom 'Has' 65536 items (10 repetitions): 6568663 ns (100 ns/op) + + github.com/willf/bloom 'Add' 65536 items (10 repetitions): 24367224 ns (371 ns/op) + github.com/willf/bloom 'Test' 65536 items (10 repetitions): 21881142 ns (333 ns/op) + github.com/dataence/bloom/standard 'Add' 65536 items (10 repetitions): 23041644 ns (351 ns/op) + github.com/dataence/bloom/standard 'Check' 65536 items (10 repetitions): 19153133 ns (292 ns/op) + github.com/cabello/bloom 'Add' 65536 items (10 repetitions): 131921507 ns (2012 ns/op) + github.com/cabello/bloom 'Contains' 65536 items (10 repetitions): 131108962 ns (2000 ns/op) + +(on MBPro15 OSX10.8.5 i7 4Core 2.4Ghz) + + +With 32bit bloom filters (bloom32) using modified sdbm, bloom32 does hashing with only 2 bit shifts, one xor and one substraction per byte. smdb is about as fast as fnv64a but gives less collisions with the dataset (see mask above). bloom.New(float64(10 * 1<<16),float64(7)) populated with 1<<16 random items from the dataset (see above) and tested against the rest results in less than 0.05% collisions. diff --git a/vendor/github.com/AndreasBriese/bbloom/bbloom.go b/vendor/github.com/AndreasBriese/bbloom/bbloom.go new file mode 100644 index 00000000..3d457406 --- /dev/null +++ b/vendor/github.com/AndreasBriese/bbloom/bbloom.go @@ -0,0 +1,270 @@ +// The MIT License (MIT) +// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt + +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package bbloom + +import ( + "bytes" + "encoding/json" + "log" + "math" + "sync" + "unsafe" +) + +// helper +var mask = []uint8{1, 2, 4, 8, 16, 32, 64, 128} + +func getSize(ui64 uint64) (size uint64, exponent uint64) { + if ui64 < uint64(512) { + ui64 = uint64(512) + } + size = uint64(1) + for size < ui64 { + size <<= 1 + exponent++ + } + return size, exponent +} + +func calcSizeByWrongPositives(numEntries, wrongs float64) (uint64, uint64) { + size := -1 * numEntries * math.Log(wrongs) / math.Pow(float64(0.69314718056), 2) + locs := math.Ceil(float64(0.69314718056) * size / numEntries) + return uint64(size), uint64(locs) +} + +// New +// returns a new bloomfilter +func New(params ...float64) (bloomfilter Bloom) { + var entries, locs uint64 + if len(params) == 2 { + if params[1] < 1 { + entries, locs = calcSizeByWrongPositives(params[0], params[1]) + } else { + entries, locs = uint64(params[0]), uint64(params[1]) + } + } else { + log.Fatal("usage: New(float64(number_of_entries), float64(number_of_hashlocations)) i.e. New(float64(1000), float64(3)) or New(float64(number_of_entries), float64(number_of_hashlocations)) i.e. New(float64(1000), float64(0.03))") + } + size, exponent := getSize(uint64(entries)) + bloomfilter = Bloom{ + sizeExp: exponent, + size: size - 1, + setLocs: locs, + shift: 64 - exponent, + } + bloomfilter.Size(size) + return bloomfilter +} + +// NewWithBoolset +// takes a []byte slice and number of locs per entry +// returns the bloomfilter with a bitset populated according to the input []byte +func NewWithBoolset(bs *[]byte, locs uint64) (bloomfilter Bloom) { + bloomfilter = New(float64(len(*bs)<<3), float64(locs)) + ptr := uintptr(unsafe.Pointer(&bloomfilter.bitset[0])) + for _, b := range *bs { + *(*uint8)(unsafe.Pointer(ptr)) = b + ptr++ + } + return bloomfilter +} + +// bloomJSONImExport +// Im/Export structure used by JSONMarshal / JSONUnmarshal +type bloomJSONImExport struct { + FilterSet []byte + SetLocs uint64 +} + +// JSONUnmarshal +// takes JSON-Object (type bloomJSONImExport) as []bytes +// returns bloom32 / bloom64 object +func JSONUnmarshal(dbData []byte) Bloom { + bloomImEx := bloomJSONImExport{} + json.Unmarshal(dbData, &bloomImEx) + buf := bytes.NewBuffer(bloomImEx.FilterSet) + bs := buf.Bytes() + bf := NewWithBoolset(&bs, bloomImEx.SetLocs) + return bf +} + +// +// Bloom filter +type Bloom struct { + Mtx sync.Mutex + ElemNum uint64 + bitset []uint64 + sizeExp uint64 + size uint64 + setLocs uint64 + shift uint64 +} + +// <--- http://www.cse.yorku.ca/~oz/hash.html +// modified Berkeley DB Hash (32bit) +// hash is casted to l, h = 16bit fragments +// func (bl Bloom) absdbm(b *[]byte) (l, h uint64) { +// hash := uint64(len(*b)) +// for _, c := range *b { +// hash = uint64(c) + (hash << 6) + (hash << bl.sizeExp) - hash +// } +// h = hash >> bl.shift +// l = hash << bl.shift >> bl.shift +// return l, h +// } + +// Update: found sipHash of Jean-Philippe Aumasson & Daniel J. Bernstein to be even faster than absdbm() +// https://131002.net/siphash/ +// siphash was implemented for Go by Dmitry Chestnykh https://github.com/dchest/siphash + +// Add +// set the bit(s) for entry; Adds an entry to the Bloom filter +func (bl *Bloom) Add(entry []byte) { + l, h := bl.sipHash(entry) + for i := uint64(0); i < (*bl).setLocs; i++ { + (*bl).Set((h + i*l) & (*bl).size) + (*bl).ElemNum++ + } +} + +// AddTS +// Thread safe: Mutex.Lock the bloomfilter for the time of processing the entry +func (bl *Bloom) AddTS(entry []byte) { + bl.Mtx.Lock() + defer bl.Mtx.Unlock() + bl.Add(entry[:]) +} + +// Has +// check if bit(s) for entry is/are set +// returns true if the entry was added to the Bloom Filter +func (bl Bloom) Has(entry []byte) bool { + l, h := bl.sipHash(entry) + for i := uint64(0); i < bl.setLocs; i++ { + switch bl.IsSet((h + i*l) & bl.size) { + case false: + return false + } + } + return true +} + +// HasTS +// Thread safe: Mutex.Lock the bloomfilter for the time of processing the entry +func (bl *Bloom) HasTS(entry []byte) bool { + bl.Mtx.Lock() + defer bl.Mtx.Unlock() + return bl.Has(entry[:]) +} + +// AddIfNotHas +// Only Add entry if it's not present in the bloomfilter +// returns true if entry was added +// returns false if entry was allready registered in the bloomfilter +func (bl Bloom) AddIfNotHas(entry []byte) (added bool) { + if bl.Has(entry[:]) { + return added + } + bl.Add(entry[:]) + return true +} + +// AddIfNotHasTS +// Tread safe: Only Add entry if it's not present in the bloomfilter +// returns true if entry was added +// returns false if entry was allready registered in the bloomfilter +func (bl *Bloom) AddIfNotHasTS(entry []byte) (added bool) { + bl.Mtx.Lock() + defer bl.Mtx.Unlock() + return bl.AddIfNotHas(entry[:]) +} + +// Size +// make Bloom filter with as bitset of size sz +func (bl *Bloom) Size(sz uint64) { + (*bl).bitset = make([]uint64, sz>>6) +} + +// Clear +// resets the Bloom filter +func (bl *Bloom) Clear() { + for i, _ := range (*bl).bitset { + (*bl).bitset[i] = 0 + } +} + +// Set +// set the bit[idx] of bitsit +func (bl *Bloom) Set(idx uint64) { + ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3)) + *(*uint8)(ptr) |= mask[idx%8] +} + +// IsSet +// check if bit[idx] of bitset is set +// returns true/false +func (bl *Bloom) IsSet(idx uint64) bool { + ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3)) + r := ((*(*uint8)(ptr)) >> (idx % 8)) & 1 + return r == 1 +} + +// JSONMarshal +// returns JSON-object (type bloomJSONImExport) as []byte +func (bl Bloom) JSONMarshal() []byte { + bloomImEx := bloomJSONImExport{} + bloomImEx.SetLocs = uint64(bl.setLocs) + bloomImEx.FilterSet = make([]byte, len(bl.bitset)<<3) + ptr := uintptr(unsafe.Pointer(&bl.bitset[0])) + for i := range bloomImEx.FilterSet { + bloomImEx.FilterSet[i] = *(*byte)(unsafe.Pointer(ptr)) + ptr++ + } + data, err := json.Marshal(bloomImEx) + if err != nil { + log.Fatal("json.Marshal failed: ", err) + } + return data +} + +// // alternative hashFn +// func (bl Bloom) fnv64a(b *[]byte) (l, h uint64) { +// h64 := fnv.New64a() +// h64.Write(*b) +// hash := h64.Sum64() +// h = hash >> 32 +// l = hash << 32 >> 32 +// return l, h +// } +// +// // <-- http://partow.net/programming/hashfunctions/index.html +// // citation: An algorithm proposed by Donald E. Knuth in The Art Of Computer Programming Volume 3, +// // under the topic of sorting and search chapter 6.4. +// // modified to fit with boolset-length +// func (bl Bloom) DEKHash(b *[]byte) (l, h uint64) { +// hash := uint64(len(*b)) +// for _, c := range *b { +// hash = ((hash << 5) ^ (hash >> bl.shift)) ^ uint64(c) +// } +// h = hash >> bl.shift +// l = hash << bl.sizeExp >> bl.sizeExp +// return l, h +// } diff --git a/vendor/github.com/AndreasBriese/bbloom/sipHash.go b/vendor/github.com/AndreasBriese/bbloom/sipHash.go new file mode 100644 index 00000000..a91d8199 --- /dev/null +++ b/vendor/github.com/AndreasBriese/bbloom/sipHash.go @@ -0,0 +1,225 @@ +// Written in 2012 by Dmitry Chestnykh. +// +// To the extent possible under law, the author have dedicated all copyright +// and related and neighboring rights to this software to the public domain +// worldwide. This software is distributed without any warranty. +// http://creativecommons.org/publicdomain/zero/1.0/ +// +// Package siphash implements SipHash-2-4, a fast short-input PRF +// created by Jean-Philippe Aumasson and Daniel J. Bernstein. + +package bbloom + +// Hash returns the 64-bit SipHash-2-4 of the given byte slice with two 64-bit +// parts of 128-bit key: k0 and k1. +func (bl Bloom) sipHash(p []byte) (l, h uint64) { + // Initialization. + v0 := uint64(8317987320269560794) // k0 ^ 0x736f6d6570736575 + v1 := uint64(7237128889637516672) // k1 ^ 0x646f72616e646f6d + v2 := uint64(7816392314733513934) // k0 ^ 0x6c7967656e657261 + v3 := uint64(8387220255325274014) // k1 ^ 0x7465646279746573 + t := uint64(len(p)) << 56 + + // Compression. + for len(p) >= 8 { + + m := uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | + uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56 + + v3 ^= m + + // Round 1. + v0 += v1 + v1 = v1<<13 | v1>>51 + v1 ^= v0 + v0 = v0<<32 | v0>>32 + + v2 += v3 + v3 = v3<<16 | v3>>48 + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>43 + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>47 + v1 ^= v2 + v2 = v2<<32 | v2>>32 + + // Round 2. + v0 += v1 + v1 = v1<<13 | v1>>51 + v1 ^= v0 + v0 = v0<<32 | v0>>32 + + v2 += v3 + v3 = v3<<16 | v3>>48 + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>43 + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>47 + v1 ^= v2 + v2 = v2<<32 | v2>>32 + + v0 ^= m + p = p[8:] + } + + // Compress last block. + switch len(p) { + case 7: + t |= uint64(p[6]) << 48 + fallthrough + case 6: + t |= uint64(p[5]) << 40 + fallthrough + case 5: + t |= uint64(p[4]) << 32 + fallthrough + case 4: + t |= uint64(p[3]) << 24 + fallthrough + case 3: + t |= uint64(p[2]) << 16 + fallthrough + case 2: + t |= uint64(p[1]) << 8 + fallthrough + case 1: + t |= uint64(p[0]) + } + + v3 ^= t + + // Round 1. + v0 += v1 + v1 = v1<<13 | v1>>51 + v1 ^= v0 + v0 = v0<<32 | v0>>32 + + v2 += v3 + v3 = v3<<16 | v3>>48 + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>43 + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>47 + v1 ^= v2 + v2 = v2<<32 | v2>>32 + + // Round 2. + v0 += v1 + v1 = v1<<13 | v1>>51 + v1 ^= v0 + v0 = v0<<32 | v0>>32 + + v2 += v3 + v3 = v3<<16 | v3>>48 + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>43 + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>47 + v1 ^= v2 + v2 = v2<<32 | v2>>32 + + v0 ^= t + + // Finalization. + v2 ^= 0xff + + // Round 1. + v0 += v1 + v1 = v1<<13 | v1>>51 + v1 ^= v0 + v0 = v0<<32 | v0>>32 + + v2 += v3 + v3 = v3<<16 | v3>>48 + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>43 + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>47 + v1 ^= v2 + v2 = v2<<32 | v2>>32 + + // Round 2. + v0 += v1 + v1 = v1<<13 | v1>>51 + v1 ^= v0 + v0 = v0<<32 | v0>>32 + + v2 += v3 + v3 = v3<<16 | v3>>48 + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>43 + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>47 + v1 ^= v2 + v2 = v2<<32 | v2>>32 + + // Round 3. + v0 += v1 + v1 = v1<<13 | v1>>51 + v1 ^= v0 + v0 = v0<<32 | v0>>32 + + v2 += v3 + v3 = v3<<16 | v3>>48 + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>43 + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>47 + v1 ^= v2 + v2 = v2<<32 | v2>>32 + + // Round 4. + v0 += v1 + v1 = v1<<13 | v1>>51 + v1 ^= v0 + v0 = v0<<32 | v0>>32 + + v2 += v3 + v3 = v3<<16 | v3>>48 + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>43 + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>47 + v1 ^= v2 + v2 = v2<<32 | v2>>32 + + // return v0 ^ v1 ^ v2 ^ v3 + + hash := v0 ^ v1 ^ v2 ^ v3 + h = hash >> bl.shift + l = hash << bl.shift >> bl.shift + return l, h + +} diff --git a/vendor/github.com/AndreasBriese/bbloom/words.txt b/vendor/github.com/AndreasBriese/bbloom/words.txt new file mode 100644 index 00000000..ad86a31a --- /dev/null +++ b/vendor/github.com/AndreasBriese/bbloom/words.txt @@ -0,0 +1,140 @@ +2014/01/01 00:00:00 /info.html +2014/01/01 00:00:00 /info.html +2014/01/01 00:00:01 /info.html +2014/01/01 00:00:02 /info.html +2014/01/01 00:00:03 /info.html +2014/01/01 00:00:04 /info.html +2014/01/01 00:00:05 /info.html +2014/01/01 00:00:06 /info.html +2014/01/01 00:00:07 /info.html +2014/01/01 00:00:08 /info.html +2014/01/01 00:00:09 /info.html +2014/01/01 00:00:10 /info.html +2014/01/01 00:00:11 /info.html +2014/01/01 00:00:12 /info.html +2014/01/01 00:00:13 /info.html +2014/01/01 00:00:14 /info.html +2014/01/01 00:00:15 /info.html +2014/01/01 00:00:16 /info.html +2014/01/01 00:00:17 /info.html +2014/01/01 00:00:18 /info.html +2014/01/01 00:00:19 /info.html +2014/01/01 00:00:20 /info.html +2014/01/01 00:00:21 /info.html +2014/01/01 00:00:22 /info.html +2014/01/01 00:00:23 /info.html +2014/01/01 00:00:24 /info.html +2014/01/01 00:00:25 /info.html +2014/01/01 00:00:26 /info.html +2014/01/01 00:00:27 /info.html +2014/01/01 00:00:28 /info.html +2014/01/01 00:00:29 /info.html +2014/01/01 00:00:30 /info.html +2014/01/01 00:00:31 /info.html +2014/01/01 00:00:32 /info.html +2014/01/01 00:00:33 /info.html +2014/01/01 00:00:34 /info.html +2014/01/01 00:00:35 /info.html +2014/01/01 00:00:36 /info.html +2014/01/01 00:00:37 /info.html +2014/01/01 00:00:38 /info.html +2014/01/01 00:00:39 /info.html +2014/01/01 00:00:40 /info.html +2014/01/01 00:00:41 /info.html +2014/01/01 00:00:42 /info.html +2014/01/01 00:00:43 /info.html +2014/01/01 00:00:44 /info.html +2014/01/01 00:00:45 /info.html +2014/01/01 00:00:46 /info.html +2014/01/01 00:00:47 /info.html +2014/01/01 00:00:48 /info.html +2014/01/01 00:00:49 /info.html +2014/01/01 00:00:50 /info.html +2014/01/01 00:00:51 /info.html +2014/01/01 00:00:52 /info.html +2014/01/01 00:00:53 /info.html +2014/01/01 00:00:54 /info.html +2014/01/01 00:00:55 /info.html +2014/01/01 00:00:56 /info.html +2014/01/01 00:00:57 /info.html +2014/01/01 00:00:58 /info.html +2014/01/01 00:00:59 /info.html +2014/01/01 00:01:00 /info.html +2014/01/01 00:01:01 /info.html +2014/01/01 00:01:02 /info.html +2014/01/01 00:01:03 /info.html +2014/01/01 00:01:04 /info.html +2014/01/01 00:01:05 /info.html +2014/01/01 00:01:06 /info.html +2014/01/01 00:01:07 /info.html +2014/01/01 00:01:08 /info.html +2014/01/01 00:01:09 /info.html +2014/01/01 00:01:10 /info.html +2014/01/01 00:01:11 /info.html +2014/01/01 00:01:12 /info.html +2014/01/01 00:01:13 /info.html +2014/01/01 00:01:14 /info.html +2014/01/01 00:01:15 /info.html +2014/01/01 00:01:16 /info.html +2014/01/01 00:01:17 /info.html +2014/01/01 00:01:18 /info.html +2014/01/01 00:01:19 /info.html +2014/01/01 00:01:20 /info.html +2014/01/01 00:01:21 /info.html +2014/01/01 00:01:22 /info.html +2014/01/01 00:01:23 /info.html +2014/01/01 00:01:24 /info.html +2014/01/01 00:01:25 /info.html +2014/01/01 00:01:26 /info.html +2014/01/01 00:01:27 /info.html +2014/01/01 00:01:28 /info.html +2014/01/01 00:01:29 /info.html +2014/01/01 00:01:30 /info.html +2014/01/01 00:01:31 /info.html +2014/01/01 00:01:32 /info.html +2014/01/01 00:01:33 /info.html +2014/01/01 00:01:34 /info.html +2014/01/01 00:01:35 /info.html +2014/01/01 00:01:36 /info.html +2014/01/01 00:01:37 /info.html +2014/01/01 00:01:38 /info.html +2014/01/01 00:01:39 /info.html +2014/01/01 00:01:40 /info.html +2014/01/01 00:01:41 /info.html +2014/01/01 00:01:42 /info.html +2014/01/01 00:01:43 /info.html +2014/01/01 00:01:44 /info.html +2014/01/01 00:01:45 /info.html +2014/01/01 00:01:46 /info.html +2014/01/01 00:01:47 /info.html +2014/01/01 00:01:48 /info.html +2014/01/01 00:01:49 /info.html +2014/01/01 00:01:50 /info.html +2014/01/01 00:01:51 /info.html +2014/01/01 00:01:52 /info.html +2014/01/01 00:01:53 /info.html +2014/01/01 00:01:54 /info.html +2014/01/01 00:01:55 /info.html +2014/01/01 00:01:56 /info.html +2014/01/01 00:01:57 /info.html +2014/01/01 00:01:58 /info.html +2014/01/01 00:01:59 /info.html +2014/01/01 00:02:00 /info.html +2014/01/01 00:02:01 /info.html +2014/01/01 00:02:02 /info.html +2014/01/01 00:02:03 /info.html +2014/01/01 00:02:04 /info.html +2014/01/01 00:02:05 /info.html +2014/01/01 00:02:06 /info.html +2014/01/01 00:02:07 /info.html +2014/01/01 00:02:08 /info.html +2014/01/01 00:02:09 /info.html +2014/01/01 00:02:10 /info.html +2014/01/01 00:02:11 /info.html +2014/01/01 00:02:12 /info.html +2014/01/01 00:02:13 /info.html +2014/01/01 00:02:14 /info.html +2014/01/01 00:02:15 /info.html +2014/01/01 00:02:16 /info.html +2014/01/01 00:02:17 /info.html +2014/01/01 00:02:18 /info.html diff --git a/vendor/github.com/dgraph-io/badger/CHANGELOG.md b/vendor/github.com/dgraph-io/badger/CHANGELOG.md new file mode 100644 index 00000000..550b66e2 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/CHANGELOG.md @@ -0,0 +1,100 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.5.3] - 2018-07-11 +Bug Fixes: +* Fix a panic caused due to item.vptr not copying over vs.Value, when looking + for a move key. + +## [1.5.2] - 2018-06-19 +Bug Fixes: +* Fix the way move key gets generated. +* If a transaction has unclosed, or multiple iterators running simultaneously, + throw a panic. Every iterator must be properly closed. At any point in time, + only one iterator per transaction can be running. This is to avoid bugs in a + transaction data structure which is thread unsafe. + +* *Warning: This change might cause panics in user code. Fix is to properly + close your iterators, and only have one running at a time per transaction.* + +## [1.5.1] - 2018-06-04 +Bug Fixes: +* Fix for infinite yieldItemValue recursion. #503 +* Fix recursive addition of `badgerMove` prefix. https://github.com/dgraph-io/badger/commit/2e3a32f0ccac3066fb4206b28deb39c210c5266f +* Use file size based window size for sampling, instead of fixing it to 10MB. #501 + +Cleanup: +* Clarify comments and documentation. +* Move badger tool one directory level up. + +## [1.5.0] - 2018-05-08 +* Introduce `NumVersionsToKeep` option. This option is used to discard many + versions of the same key, which saves space. +* Add a new `SetWithDiscard` method, which would indicate that all the older + versions of the key are now invalid. Those versions would be discarded during + compactions. +* Value log GC moves are now bound to another keyspace to ensure latest versions + of data are always at the top in LSM tree. +* Introduce `ValueLogMaxEntries` to restrict the number of key-value pairs per + value log file. This helps bound the time it takes to garbage collect one + file. + +## [1.4.0] - 2018-05-04 +* Make mmap-ing of value log optional. +* Run GC multiple times, based on recorded discard statistics. +* Add MergeOperator. +* Force compact L0 on clsoe (#439). +* Add truncate option to warn about data loss (#452). +* Discard key versions during compaction (#464). +* Introduce new `LSMOnlyOptions`, to make Badger act like a typical LSM based DB. + +Bug fix: +* (Temporary) Check max version across all tables in Get (removed in next + release). +* Update commit and read ts while loading from backup. +* Ensure all transaction entries are part of the same value log file. +* On commit, run unlock callbacks before doing writes (#413). +* Wait for goroutines to finish before closing iterators (#421). + +## [1.3.0] - 2017-12-12 +* Add `DB.NextSequence()` method to generate monotonically increasing integer + sequences. +* Add `DB.Size()` method to return the size of LSM and value log files. +* Tweaked mmap code to make Windows 32-bit builds work. +* Tweaked build tags on some files to make iOS builds work. +* Fix `DB.PurgeOlderVersions()` to not violate some constraints. + +## [1.2.0] - 2017-11-30 +* Expose a `Txn.SetEntry()` method to allow setting the key-value pair + and all the metadata at the same time. + +## [1.1.1] - 2017-11-28 +* Fix bug where txn.Get was returing key deleted in same transaction. +* Fix race condition while decrementing reference in oracle. +* Update doneCommit in the callback for CommitAsync. +* Iterator see writes of current txn. + +## [1.1.0] - 2017-11-13 +* Create Badger directory if it does not exist when `badger.Open` is called. +* Added `Item.ValueCopy()` to avoid deadlocks in long-running iterations +* Fixed 64-bit alignment issues to make Badger run on Arm v7 + +## [1.0.1] - 2017-11-06 +* Fix an uint16 overflow when resizing key slice + +[Unreleased]: https://github.com/dgraph-io/badger/compare/v1.5.3...HEAD +[1.5.3]: https://github.com/dgraph-io/badger/compare/v1.5.2...v1.5.3 +[1.5.2]: https://github.com/dgraph-io/badger/compare/v1.5.1...v1.5.2 +[1.5.1]: https://github.com/dgraph-io/badger/compare/v1.5.0...v1.5.1 +[1.5.0]: https://github.com/dgraph-io/badger/compare/v1.4.0...v1.5.0 +[1.4.0]: https://github.com/dgraph-io/badger/compare/v1.3.0...v1.4.0 +[1.3.0]: https://github.com/dgraph-io/badger/compare/v1.2.0...v1.3.0 +[1.2.0]: https://github.com/dgraph-io/badger/compare/v1.1.1...v1.2.0 +[1.1.1]: https://github.com/dgraph-io/badger/compare/v1.1.0...v1.1.1 +[1.1.0]: https://github.com/dgraph-io/badger/compare/v1.0.1...v1.1.0 +[1.0.1]: https://github.com/dgraph-io/badger/compare/v1.0.0...v1.0.1 diff --git a/vendor/github.com/dgraph-io/badger/CODE_OF_CONDUCT.md b/vendor/github.com/dgraph-io/badger/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..bf7bbc29 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/CODE_OF_CONDUCT.md @@ -0,0 +1,5 @@ +# Code of Conduct + +Our Code of Conduct can be found here: + +https://dgraph.io/conduct diff --git a/vendor/github.com/dgraph-io/badger/LICENSE b/vendor/github.com/dgraph-io/badger/LICENSE new file mode 100644 index 00000000..d9a10c0d --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/LICENSE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/dgraph-io/badger/README.md b/vendor/github.com/dgraph-io/badger/README.md new file mode 100644 index 00000000..4133210f --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/README.md @@ -0,0 +1,773 @@ +# BadgerDB [![GoDoc](https://godoc.org/github.com/dgraph-io/badger?status.svg)](https://godoc.org/github.com/dgraph-io/badger) [![Go Report Card](https://goreportcard.com/badge/github.com/dgraph-io/badger)](https://goreportcard.com/report/github.com/dgraph-io/badger) [![Sourcegraph](https://sourcegraph.com/github.com/dgraph-io/badger/-/badge.svg)](https://sourcegraph.com/github.com/dgraph-io/badger?badge) [![Build Status](https://teamcity.dgraph.io/guestAuth/app/rest/builds/buildType:(id:Badger_UnitTests)/statusIcon.svg)](https://teamcity.dgraph.io/viewLog.html?buildTypeId=Badger_UnitTests&buildId=lastFinished&guest=1) ![Appveyor](https://ci.appveyor.com/api/projects/status/github/dgraph-io/badger?branch=master&svg=true) [![Coverage Status](https://coveralls.io/repos/github/dgraph-io/badger/badge.svg?branch=master)](https://coveralls.io/github/dgraph-io/badger?branch=master) + +![Badger mascot](images/diggy-shadow.png) + +BadgerDB is an embeddable, persistent and fast key-value (KV) database +written in pure Go. It's meant to be a performant alternative to non-Go-based +key-value stores like [RocksDB](https://github.com/facebook/rocksdb). + +## Project Status [Oct 27, 2018] + +Badger is stable and is being used to serve data sets worth hundreds of +terabytes. Badger supports concurrent ACID transactions with serializable +snapshot isolation (SSI) guarantees. A Jepsen-style bank test runs nightly for +8h, with `--race` flag and ensures maintainance of transactional guarantees. +Badger has also been tested to work with filesystem level anomalies, to ensure +persistence and consistency. + +Badger v1.0 was released in Nov 2017, with a Badger v2.0 release coming up in a +few months. The [Changelog] is kept fairly up-to-date. + +[Changelog]:https://github.com/dgraph-io/badger/blob/master/CHANGELOG.md + +## Table of Contents + * [Getting Started](#getting-started) + + [Installing](#installing) + + [Opening a database](#opening-a-database) + + [Transactions](#transactions) + - [Read-only transactions](#read-only-transactions) + - [Read-write transactions](#read-write-transactions) + - [Managing transactions manually](#managing-transactions-manually) + + [Using key/value pairs](#using-keyvalue-pairs) + + [Monotonically increasing integers](#monotonically-increasing-integers) + * [Merge Operations](#merge-operations) + + [Setting Time To Live(TTL) and User Metadata on Keys](#setting-time-to-livettl-and-user-metadata-on-keys) + + [Iterating over keys](#iterating-over-keys) + - [Prefix scans](#prefix-scans) + - [Key-only iteration](#key-only-iteration) + + [Stream](#stream) + + [Garbage Collection](#garbage-collection) + + [Database backup](#database-backup) + + [Memory usage](#memory-usage) + + [Statistics](#statistics) + * [Resources](#resources) + + [Blog Posts](#blog-posts) + * [Contact](#contact) + * [Design](#design) + + [Comparisons](#comparisons) + + [Benchmarks](#benchmarks) + * [Other Projects Using Badger](#other-projects-using-badger) + * [Frequently Asked Questions](#frequently-asked-questions) + +## Getting Started + +### Installing +To start using Badger, install Go 1.8 or above and run `go get`: + +```sh +$ go get github.com/dgraph-io/badger/... +``` + +This will retrieve the library and install the `badger_info` command line +utility into your `$GOBIN` path. + + +### Opening a database +The top-level object in Badger is a `DB`. It represents multiple files on disk +in specific directories, which contain the data for a single database. + +To open your database, use the `badger.Open()` function, with the appropriate +options. The `Dir` and `ValueDir` options are mandatory and must be +specified by the client. They can be set to the same value to simplify things. + +```go +package main + +import ( + "log" + + "github.com/dgraph-io/badger" +) + +func main() { + // Open the Badger database located in the /tmp/badger directory. + // It will be created if it doesn't exist. + opts := badger.DefaultOptions + opts.Dir = "/tmp/badger" + opts.ValueDir = "/tmp/badger" + db, err := badger.Open(opts) + if err != nil { + log.Fatal(err) + } + defer db.Close() +  // Your code here… +} +``` + +Please note that Badger obtains a lock on the directories so multiple processes +cannot open the same database at the same time. + +### Transactions + +#### Read-only transactions +To start a read-only transaction, you can use the `DB.View()` method: + +```go +err := db.View(func(txn *badger.Txn) error { +  // Your code here… +  return nil +}) +``` + +You cannot perform any writes or deletes within this transaction. Badger +ensures that you get a consistent view of the database within this closure. Any +writes that happen elsewhere after the transaction has started, will not be +seen by calls made within the closure. + +#### Read-write transactions +To start a read-write transaction, you can use the `DB.Update()` method: + +```go +err := db.Update(func(txn *badger.Txn) error { +  // Your code here… +  return nil +}) +``` + +All database operations are allowed inside a read-write transaction. + +Always check the returned error value. If you return an error +within your closure it will be passed through. + +An `ErrConflict` error will be reported in case of a conflict. Depending on the state +of your application, you have the option to retry the operation if you receive +this error. + +An `ErrTxnTooBig` will be reported in case the number of pending writes/deletes in +the transaction exceed a certain limit. In that case, it is best to commit the +transaction and start a new transaction immediately. Here is an example (we are +not checking for errors in some places for simplicity): + +```go +updates := make(map[string]string) +txn := db.NewTransaction(true) +for k,v := range updates { + if err := txn.Set([]byte(k),[]byte(v)); err == ErrTxnTooBig { + _ = txn.Commit() + txn = db.NewTransaction(..) + _ = txn.Set([]byte(k),[]byte(v)) + } +} +_ = txn.Commit() +``` + +#### Managing transactions manually +The `DB.View()` and `DB.Update()` methods are wrappers around the +`DB.NewTransaction()` and `Txn.Commit()` methods (or `Txn.Discard()` in case of +read-only transactions). These helper methods will start the transaction, +execute a function, and then safely discard your transaction if an error is +returned. This is the recommended way to use Badger transactions. + +However, sometimes you may want to manually create and commit your +transactions. You can use the `DB.NewTransaction()` function directly, which +takes in a boolean argument to specify whether a read-write transaction is +required. For read-write transactions, it is necessary to call `Txn.Commit()` +to ensure the transaction is committed. For read-only transactions, calling +`Txn.Discard()` is sufficient. `Txn.Commit()` also calls `Txn.Discard()` +internally to cleanup the transaction, so just calling `Txn.Commit()` is +sufficient for read-write transaction. However, if your code doesn’t call +`Txn.Commit()` for some reason (for e.g it returns prematurely with an error), +then please make sure you call `Txn.Discard()` in a `defer` block. Refer to the +code below. + +```go +// Start a writable transaction. +txn := db.NewTransaction(true) +defer txn.Discard() + +// Use the transaction... +err := txn.Set([]byte("answer"), []byte("42")) +if err != nil { + return err +} + +// Commit the transaction and check for error. +if err := txn.Commit(); err != nil { + return err +} +``` + +The first argument to `DB.NewTransaction()` is a boolean stating if the transaction +should be writable. + +Badger allows an optional callback to the `Txn.Commit()` method. Normally, the +callback can be set to `nil`, and the method will return after all the writes +have succeeded. However, if this callback is provided, the `Txn.Commit()` +method returns as soon as it has checked for any conflicts. The actual writing +to the disk happens asynchronously, and the callback is invoked once the +writing has finished, or an error has occurred. This can improve the throughput +of the application in some cases. But it also means that a transaction is not +durable until the callback has been invoked with a `nil` error value. + +### Using key/value pairs +To save a key/value pair, use the `Txn.Set()` method: + +```go +err := db.Update(func(txn *badger.Txn) error { + err := txn.Set([]byte("answer"), []byte("42")) + return err +}) +``` + +This will set the value of the `"answer"` key to `"42"`. To retrieve this +value, we can use the `Txn.Get()` method: + +```go +err := db.View(func(txn *badger.Txn) error { + item, err := txn.Get([]byte("answer")) + handle(err) + + var valNot, valCopy []byte + err := item.Value(func(val []byte) error { + // This func with val would only be called if item.Value encounters no error. + + // Accessing val here is valid. + fmt.Printf("The answer is: %s\n", val) + + // Copying or parsing val is valid. + valCopy = append([]byte{}, val...) + + // Assigning val slice to another variable is NOT OK. + valNot = val // Do not do this. + return nil + }) + handle(err) + + // DO NOT access val here. It is the most common cause of bugs. + fmt.Printf("NEVER do this. %s\n", valNot) + + // You must copy it to use it outside item.Value(...). + fmt.Printf("The answer is: %s\n", valCopy) + + // Alternatively, you could also use item.ValueCopy(). + valCopy, err = item.ValueCopy(nil) + handle(err) + fmt.Printf("The answer is: %s\n", valCopy) + + return nil +}) +``` + +`Txn.Get()` returns `ErrKeyNotFound` if the value is not found. + +Please note that values returned from `Get()` are only valid while the +transaction is open. If you need to use a value outside of the transaction +then you must use `copy()` to copy it to another byte slice. + +Use the `Txn.Delete()` method to delete a key. + +### Monotonically increasing integers + +To get unique monotonically increasing integers with strong durability, you can +use the `DB.GetSequence` method. This method returns a `Sequence` object, which +is thread-safe and can be used concurrently via various goroutines. + +Badger would lease a range of integers to hand out from memory, with the +bandwidth provided to `DB.GetSequence`. The frequency at which disk writes are +done is determined by this lease bandwidth and the frequency of `Next` +invocations. Setting a bandwith too low would do more disk writes, setting it +too high would result in wasted integers if Badger is closed or crashes. +To avoid wasted integers, call `Release` before closing Badger. + +```go +seq, err := db.GetSequence(key, 1000) +defer seq.Release() +for { + num, err := seq.Next() +} +``` + +### Merge Operations +Badger provides support for unordered merge operations. You can define a func +of type `MergeFunc` which takes in an existing value, and a value to be +_merged_ with it. It returns a new value which is the result of the _merge_ +operation. All values are specified in byte arrays. For e.g., here is a merge +function (`add`) which adds a `uint64` value to an existing `uint64` value. + +```Go +func uint64ToBytes(i uint64) []byte { + var buf [8]byte + binary.BigEndian.PutUint64(buf[:], i) + return buf[:] +} + +func bytesToUint64(b []byte) uint64 { + return binary.BigEndian.Uint64(b) +} + +// Merge function to add two uint64 numbers +func add(existing, new []byte) []byte { + return uint64ToBytes(bytesToUint64(existing) + bytesToUint64(new)) +} +``` + +This function can then be passed to the `DB.GetMergeOperator()` method, along +with a key, and a duration value. The duration specifies how often the merge +function is run on values that have been added using the `MergeOperator.Add()` +method. + +`MergeOperator.Get()` method can be used to retrieve the cumulative value of the key +associated with the merge operation. + +```Go +key := []byte("merge") +m := db.GetMergeOperator(key, add, 200*time.Millisecond) +defer m.Stop() + +m.Add(uint64ToBytes(1)) +m.Add(uint64ToBytes(2)) +m.Add(uint64ToBytes(3)) + +res, err := m.Get() // res should have value 6 encoded +fmt.Println(bytesToUint64(res)) +``` + +### Setting Time To Live(TTL) and User Metadata on Keys +Badger allows setting an optional Time to Live (TTL) value on keys. Once the TTL has +elapsed, the key will no longer be retrievable and will be eligible for garbage +collection. A TTL can be set as a `time.Duration` value using the `Txn.SetWithTTL()` +API method. + +An optional user metadata value can be set on each key. A user metadata value +is represented by a single byte. It can be used to set certain bits along +with the key to aid in interpreting or decoding the key-value pair. User +metadata can be set using the `Txn.SetWithMeta()` API method. + +`Txn.SetEntry()` can be used to set the key, value, user metatadata and TTL, +all at once. + +### Iterating over keys +To iterate over keys, we can use an `Iterator`, which can be obtained using the +`Txn.NewIterator()` method. Iteration happens in byte-wise lexicographical sorting +order. + + +```go +err := db.View(func(txn *badger.Txn) error { + opts := badger.DefaultIteratorOptions + opts.PrefetchSize = 10 + it := txn.NewIterator(opts) + defer it.Close() + for it.Rewind(); it.Valid(); it.Next() { + item := it.Item() + k := item.Key() + err := item.Value(func(v []byte) error { + fmt.Printf("key=%s, value=%s\n", k, v) + return nil + }) + if err != nil { + return err + } + } + return nil +}) +``` + +The iterator allows you to move to a specific point in the list of keys and move +forward or backward through the keys one at a time. + +By default, Badger prefetches the values of the next 100 items. You can adjust +that with the `IteratorOptions.PrefetchSize` field. However, setting it to +a value higher than GOMAXPROCS (which we recommend to be 128 or higher) +shouldn’t give any additional benefits. You can also turn off the fetching of +values altogether. See section below on key-only iteration. + +#### Prefix scans +To iterate over a key prefix, you can combine `Seek()` and `ValidForPrefix()`: + +```go +db.View(func(txn *badger.Txn) error { + it := txn.NewIterator(badger.DefaultIteratorOptions) + defer it.Close() + prefix := []byte("1234") + for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { + item := it.Item() + k := item.Key() + err := item.Value(func(v []byte) error { + fmt.Printf("key=%s, value=%s\n", k, v) + return nil + }) + if err != nil { + return err + } + } + return nil +}) +``` + +#### Key-only iteration +Badger supports a unique mode of iteration called _key-only_ iteration. It is +several order of magnitudes faster than regular iteration, because it involves +access to the LSM-tree only, which is usually resident entirely in RAM. To +enable key-only iteration, you need to set the `IteratorOptions.PrefetchValues` +field to `false`. This can also be used to do sparse reads for selected keys +during an iteration, by calling `item.Value()` only when required. + +```go +err := db.View(func(txn *badger.Txn) error { + opts := badger.DefaultIteratorOptions + opts.PrefetchValues = false + it := txn.NewIterator(opts) + defer it.Close() + for it.Rewind(); it.Valid(); it.Next() { + item := it.Item() + k := item.Key() + fmt.Printf("key=%s\n", k) + } + return nil +}) +``` + +### Stream +Badger provides a Stream framework, which concurrently iterates over all or a +portion of the DB, converting data into custom key-values, and streams it out +serially to be sent over network, written to disk, or even written back to +Badger. This is a lot faster way to iterate over Badger than using a single +Iterator. Stream supports Badger in both managed and normal mode. + +Stream uses the natural boundaries created by SSTables within the LSM tree, to +quickly generate key ranges. Each goroutine then picks a range and runs an +iterator to iterate over it. Each iterator iterates over all versions of values +and is created from the same transaction, thus working over a snapshot of the +DB. Every time a new key is encountered, it calls `ChooseKey(item)`, followed +by `KeyToList(key, itr)`. This allows a user to select or reject that key, and +if selected, convert the value versions into custom key-values. The goroutine +batches up 4MB worth of key-values, before sending it over to a channel. +Another goroutine further batches up data from this channel using *smart +batching* algorithm and calls `Send` serially. + +This framework is designed for high throughput key-value iteration, spreading +the work of iteration across many goroutines. `DB.Backup` uses this framework to +provide full and incremental backups quickly. Dgraph is a heavy user of this +framework. In fact, this framework was developed and used within Dgraph, before +getting ported over to Badger. + +```go +stream := db.NewStream() +// db.NewStreamAt(readTs) for managed mode. + +// -- Optional settings +stream.NumGo = 16 // Set number of goroutines to use for iteration. +stream.Prefix = []byte("some-prefix") // Leave nil for iteration over the whole DB. +stream.LogPrefix = "Badger.Streaming" // For identifying stream logs. Outputs to Logger. + +// ChooseKey is called concurrently for every key. If left nil, assumes true by default. +stream.ChooseKey = func(item *badger.Item) bool { + return bytes.HasSuffix(item.Key(), []byte("er")) +} + +// KeyToList is called concurrently for chosen keys. This can be used to convert +// Badger data into custom key-values. If nil, uses stream.ToList, a default +// implementation, which picks all valid key-values. +stream.KeyToList = nil + +// -- End of optional settings. + +// Send is called serially, while Stream.Orchestrate is running. +stream.Send = func(list *pb.KVList) error { + return proto.MarshalText(w, list) // Write to w. +} + +// Run the stream +if err := stream.Orchestrate(context.Background()); err != nil { + return err +} +// Done. +``` + +### Garbage Collection +Badger values need to be garbage collected, because of two reasons: + +* Badger keeps values separately from the LSM tree. This means that the compaction operations +that clean up the LSM tree do not touch the values at all. Values need to be cleaned up +separately. + +* Concurrent read/write transactions could leave behind multiple values for a single key, because they +are stored with different versions. These could accumulate, and take up unneeded space beyond the +time these older versions are needed. + +Badger relies on the client to perform garbage collection at a time of their choosing. It provides +the following method, which can be invoked at an appropriate time: + +* `DB.RunValueLogGC()`: This method is designed to do garbage collection while + Badger is online. Along with randomly picking a file, it uses statistics generated by the + LSM-tree compactions to pick files that are likely to lead to maximum space + reclamation. It is recommended to be called during periods of low activity in + your system, or periodically. One call would only result in removal of at max + one log file. As an optimization, you could also immediately re-run it whenever + it returns nil error (indicating a successful value log GC), as shown below. + + ```go + ticker := time.NewTicker(5 * time.Minute) + defer ticker.Stop() + for range ticker.C { + again: + err := db.RunValueLogGC(0.7) + if err == nil { + goto again + } + } + ``` + +* `DB.PurgeOlderVersions()`: This method is **DEPRECATED** since v1.5.0. Now, Badger's LSM tree automatically discards older/invalid versions of keys. + +**Note: The RunValueLogGC method would not garbage collect the latest value log.** + +### Database backup +There are two public API methods `DB.Backup()` and `DB.Load()` which can be +used to do online backups and restores. Badger v0.9 provides a CLI tool +`badger`, which can do offline backup/restore. Make sure you have `$GOPATH/bin` +in your PATH to use this tool. + +The command below will create a version-agnostic backup of the database, to a +file `badger.bak` in the current working directory + +``` +badger backup --dir +``` + +To restore `badger.bak` in the current working directory to a new database: + +``` +badger restore --dir +``` + +See `badger --help` for more details. + +If you have a Badger database that was created using v0.8 (or below), you can +use the `badger_backup` tool provided in v0.8.1, and then restore it using the +command above to upgrade your database to work with the latest version. + +``` +badger_backup --dir --backup-file badger.bak +``` + +We recommend all users to use the `Backup` and `Restore` APIs and tools. However, +Badger is also rsync-friendly because all files are immutable, barring the +latest value log which is append-only. So, rsync can be used as rudimentary way +to perform a backup. In the following script, we repeat rsync to ensure that the +LSM tree remains consistent with the MANIFEST file while doing a full backup. + +``` +#!/bin/bash +set -o history +set -o histexpand +# Makes a complete copy of a Badger database directory. +# Repeat rsync if the MANIFEST and SSTables are updated. +rsync -avz --delete db/ dst +while !! | grep -q "(MANIFEST\|\.sst)$"; do :; done +``` + +### Memory usage +Badger's memory usage can be managed by tweaking several options available in +the `Options` struct that is passed in when opening the database using +`DB.Open`. + +- `Options.ValueLogLoadingMode` can be set to `options.FileIO` (instead of the + default `options.MemoryMap`) to avoid memory-mapping log files. This can be + useful in environments with low RAM. +- Number of memtables (`Options.NumMemtables`) + - If you modify `Options.NumMemtables`, also adjust `Options.NumLevelZeroTables` and + `Options.NumLevelZeroTablesStall` accordingly. +- Number of concurrent compactions (`Options.NumCompactors`) +- Mode in which LSM tree is loaded (`Options.TableLoadingMode`) +- Size of table (`Options.MaxTableSize`) +- Size of value log file (`Options.ValueLogFileSize`) + +If you want to decrease the memory usage of Badger instance, tweak these +options (ideally one at a time) until you achieve the desired +memory usage. + +### Statistics +Badger records metrics using the [expvar] package, which is included in the Go +standard library. All the metrics are documented in [y/metrics.go][metrics] +file. + +`expvar` package adds a handler in to the default HTTP server (which has to be +started explicitly), and serves up the metrics at the `/debug/vars` endpoint. +These metrics can then be collected by a system like [Prometheus], to get +better visibility into what Badger is doing. + +[expvar]: https://golang.org/pkg/expvar/ +[metrics]: https://github.com/dgraph-io/badger/blob/master/y/metrics.go +[Prometheus]: https://prometheus.io/ + +## Resources + +### Blog Posts +1. [Introducing Badger: A fast key-value store written natively in +Go](https://open.dgraph.io/post/badger/) +2. [Make Badger crash resilient with ALICE](https://blog.dgraph.io/post/alice/) +3. [Badger vs LMDB vs BoltDB: Benchmarking key-value databases in Go](https://blog.dgraph.io/post/badger-lmdb-boltdb/) +4. [Concurrent ACID Transactions in Badger](https://blog.dgraph.io/post/badger-txn/) + +## Design +Badger was written with these design goals in mind: + +- Write a key-value database in pure Go. +- Use latest research to build the fastest KV database for data sets spanning terabytes. +- Optimize for SSDs. + +Badger’s design is based on a paper titled _[WiscKey: Separating Keys from +Values in SSD-conscious Storage][wisckey]_. + +[wisckey]: https://www.usenix.org/system/files/conference/fast16/fast16-papers-lu.pdf + +### Comparisons +| Feature | Badger | RocksDB | BoltDB | +| ------- | ------ | ------- | ------ | +| Design | LSM tree with value log | LSM tree only | B+ tree | +| High Read throughput | Yes | No | Yes | +| High Write throughput | Yes | Yes | No | +| Designed for SSDs | Yes (with latest research 1) | Not specifically 2 | No | +| Embeddable | Yes | Yes | Yes | +| Sorted KV access | Yes | Yes | Yes | +| Pure Go (no Cgo) | Yes | No | Yes | +| Transactions | Yes, ACID, concurrent with SSI3 | Yes (but non-ACID) | Yes, ACID | +| Snapshots | Yes | Yes | Yes | +| TTL support | Yes | Yes | No | +| 3D access (key-value-version) | Yes4 | No | No | + +1 The [WISCKEY paper][wisckey] (on which Badger is based) saw big +wins with separating values from keys, significantly reducing the write +amplification compared to a typical LSM tree. + +2 RocksDB is an SSD optimized version of LevelDB, which was designed specifically for rotating disks. +As such RocksDB's design isn't aimed at SSDs. + +3 SSI: Serializable Snapshot Isolation. For more details, see the blog post [Concurrent ACID Transactions in Badger](https://blog.dgraph.io/post/badger-txn/) + +4 Badger provides direct access to value versions via its Iterator API. +Users can also specify how many versions to keep per key via Options. + +### Benchmarks +We have run comprehensive benchmarks against RocksDB, Bolt and LMDB. The +benchmarking code, and the detailed logs for the benchmarks can be found in the +[badger-bench] repo. More explanation, including graphs can be found the blog posts (linked +above). + +[badger-bench]: https://github.com/dgraph-io/badger-bench + +## Other Projects Using Badger +Below is a list of known projects that use Badger: + +* [0-stor](https://github.com/zero-os/0-stor) - Single device object store. +* [Dgraph](https://github.com/dgraph-io/dgraph) - Distributed graph database. +* [Dispatch Protocol](https://github.com/dispatchlabs/disgo) - Blockchain protocol for distributed application data analytics. +* [Sandglass](https://github.com/celrenheit/sandglass) - distributed, horizontally scalable, persistent, time sorted message queue. +* [Usenet Express](https://usenetexpress.com/) - Serving over 300TB of data with Badger. +* [go-ipfs](https://github.com/ipfs/go-ipfs) - Go client for the InterPlanetary File System (IPFS), a new hypermedia distribution protocol. +* [gorush](https://github.com/appleboy/gorush) - A push notification server written in Go. +* [emitter](https://github.com/emitter-io/emitter) - Scalable, low latency, distributed pub/sub broker with message storage, uses MQTT, gossip and badger. +* [GarageMQ](https://github.com/valinurovam/garagemq) - AMQP server written in Go. +* [RedixDB](https://alash3al.github.io/redix/) - A real-time persistent key-value store with the same redis protocol. +* [BBVA](https://github.com/BBVA/raft-badger) - Raft backend implementation using BadgerDB for Hashicorp raft. +* [Riot](https://github.com/go-ego/riot) - An open-source, distributed search engine. +* [Fantom](https://github.com/Fantom-foundation/go-lachesis) - aBFT Consensus platform for distributed applications. +* [decred](https://github.com/decred/dcrdata) - An open, progressive, and self-funding cryptocurrency with a system of community-based governance integrated into its blockchain. +* [OpenNetSys](https://github.com/opennetsys/c3-go) - Create useful dApps in any software language. +* [HoneyTrap](https://github.com/honeytrap/honeytrap) - An extensible and opensource system for running, monitoring and managing honeypots. +* [Insolar](https://github.com/insolar/insolar) - Enterprise-ready blockchain platform. +* [IoTeX](https://github.com/iotexproject/iotex-core) - The next generation of the decentralized network for IoT powered by scalability- and privacy-centric blockchains. +* [go-sessions](https://github.com/kataras/go-sessions) - The sessions manager for Go net/http and fasthttp. +* [Babble](https://github.com/mosaicnetworks/babble) - BFT Consensus platform for distributed applications. +* [Tormenta](https://github.com/jpincas/tormenta) - Embedded object-persistence layer / simple JSON database for Go projects. +* [BadgerHold](https://github.com/timshannon/badgerhold) - An embeddable NoSQL store for querying Go types built on Badger + +If you are using Badger in a project please send a pull request to add it to the list. + +## Frequently Asked Questions +- **My writes are getting stuck. Why?** + +**Update: With the new `Value(func(v []byte))` API, this deadlock can no longer +happen.** + +The following is true for users on Badger v1.x. + +This can happen if a long running iteration with `Prefetch` is set to false, but +a `Item::Value` call is made internally in the loop. That causes Badger to +acquire read locks over the value log files to avoid value log GC removing the +file from underneath. As a side effect, this also blocks a new value log GC +file from being created, when the value log file boundary is hit. + +Please see Github issues [#293](https://github.com/dgraph-io/badger/issues/293) +and [#315](https://github.com/dgraph-io/badger/issues/315). + +There are multiple workarounds during iteration: + +1. Use `Item::ValueCopy` instead of `Item::Value` when retrieving value. +1. Set `Prefetch` to true. Badger would then copy over the value and release the + file lock immediately. +1. When `Prefetch` is false, don't call `Item::Value` and do a pure key-only + iteration. This might be useful if you just want to delete a lot of keys. +1. Do the writes in a separate transaction after the reads. + +- **My writes are really slow. Why?** + +Are you creating a new transaction for every single key update, and waiting for +it to `Commit` fully before creating a new one? This will lead to very low +throughput. + +We have created `WriteBatch` API which provides a way to batch up +many updates into a single transaction and `Commit` that transaction using +callbacks to avoid blocking. This amortizes the cost of a transaction really +well, and provides the most efficient way to do bulk writes. + +```go +wb := db.NewWriteBatch() +defer wb.Cancel() + +for i := 0; i < N; i++ { + err := wb.Set(key(i), value(i), 0) // Will create txns as needed. + handle(err) +} +handle(wb.Flush()) // Wait for all txns to finish. +``` + +Note that `WriteBatch` API does not allow any reads. For read-modify-write +workloads, you should be using the `Transaction` API. + +- **I don't see any disk write. Why?** + +If you're using Badger with `SyncWrites=false`, then your writes might not be written to value log +and won't get synced to disk immediately. Writes to LSM tree are done inmemory first, before they +get compacted to disk. The compaction would only happen once `MaxTableSize` has been reached. So, if +you're doing a few writes and then checking, you might not see anything on disk. Once you `Close` +the database, you'll see these writes on disk. + +- **Reverse iteration doesn't give me the right results.** + +Just like forward iteration goes to the first key which is equal or greater than the SEEK key, reverse iteration goes to the first key which is equal or lesser than the SEEK key. Therefore, SEEK key would not be part of the results. You can typically add a `0xff` byte as a suffix to the SEEK key to include it in the results. See the following issues: [#436](https://github.com/dgraph-io/badger/issues/436) and [#347](https://github.com/dgraph-io/badger/issues/347). + +- **Which instances should I use for Badger?** + +We recommend using instances which provide local SSD storage, without any limit +on the maximum IOPS. In AWS, these are storage optimized instances like i3. They +provide local SSDs which clock 100K IOPS over 4KB blocks easily. + +- **I'm getting a closed channel error. Why?** + +``` +panic: close of closed channel +panic: send on closed channel +``` + +If you're seeing panics like above, this would be because you're operating on a closed DB. This can happen, if you call `Close()` before sending a write, or multiple times. You should ensure that you only call `Close()` once, and all your read/write operations finish before closing. + +- **Are there any Go specific settings that I should use?** + +We *highly* recommend setting a high number for GOMAXPROCS, which allows Go to +observe the full IOPS throughput provided by modern SSDs. In Dgraph, we have set +it to 128. For more details, [see this +thread](https://groups.google.com/d/topic/golang-nuts/jPb_h3TvlKE/discussion). + +- **Are there any linux specific settings that I should use?** + +We recommend setting max file descriptors to a high number depending upon the expected size of you data. + +## Contact +- Please use [discuss.dgraph.io](https://discuss.dgraph.io) for questions, feature requests and discussions. +- Please use [Github issue tracker](https://github.com/dgraph-io/badger/issues) for filing bugs or feature requests. +- Join [![Slack Status](http://slack.dgraph.io/badge.svg)](http://slack.dgraph.io). +- Follow us on Twitter [@dgraphlabs](https://twitter.com/dgraphlabs). + diff --git a/vendor/github.com/dgraph-io/badger/appveyor.yml b/vendor/github.com/dgraph-io/badger/appveyor.yml new file mode 100644 index 00000000..79dac338 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/appveyor.yml @@ -0,0 +1,48 @@ +# version format +version: "{build}" + +# Operating system (build VM template) +os: Windows Server 2012 R2 + +# Platform. +platform: x64 + +clone_folder: c:\gopath\src\github.com\dgraph-io\badger + +# Environment variables +environment: + GOVERSION: 1.8.3 + GOPATH: c:\gopath + +# scripts that run after cloning repository +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version + - go env + - python --version + +# To run your custom scripts instead of automatic MSBuild +build_script: + # We need to disable firewall - https://github.com/appveyor/ci/issues/1579#issuecomment-309830648 + - ps: Disable-NetFirewallRule -DisplayName 'File and Printer Sharing (SMB-Out)' + - cd c:\gopath\src\github.com\dgraph-io\badger + - git branch + - go get -t ./... + +# To run your custom scripts instead of automatic tests +test_script: + # Unit tests + - ps: Add-AppveyorTest "Unit Tests" -Outcome Running + - go test -v github.com/dgraph-io/badger/... + - go test -v -vlog_mmap=false github.com/dgraph-io/badger/... + - ps: Update-AppveyorTest "Unit Tests" -Outcome Passed + +notifications: + - provider: Email + to: + - pawan@dgraph.io + on_build_failure: true + on_build_status_changed: true +# to disable deployment +deploy: off + diff --git a/vendor/github.com/dgraph-io/badger/backup.go b/vendor/github.com/dgraph-io/badger/backup.go new file mode 100644 index 00000000..170d4c37 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/backup.go @@ -0,0 +1,226 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "bufio" + "bytes" + "context" + "encoding/binary" + "io" + "sync" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/pb" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" +) + +// Backup is a wrapper function over Stream.Backup to generate full and incremental backups of the +// DB. For more control over how many goroutines are used to generate the backup, or if you wish to +// backup only a certain range of keys, use Stream.Backup directly. +func (db *DB) Backup(w io.Writer, since uint64) (uint64, error) { + stream := db.NewStream() + stream.LogPrefix = "DB.Backup" + return stream.Backup(w, since) +} + +// Backup dumps a protobuf-encoded list of all entries in the database into the +// given writer, that are newer than the specified version. It returns a +// timestamp indicating when the entries were dumped which can be passed into a +// later invocation to generate an incremental dump, of entries that have been +// added/modified since the last invocation of Stream.Backup(). +// +// This can be used to backup the data in a database at a given point in time. +func (stream *Stream) Backup(w io.Writer, since uint64) (uint64, error) { + stream.KeyToList = func(key []byte, itr *Iterator) (*pb.KVList, error) { + list := &pb.KVList{} + for ; itr.Valid(); itr.Next() { + item := itr.Item() + if !bytes.Equal(item.Key(), key) { + return list, nil + } + if item.Version() < since { + // Ignore versions less than given timestamp, or skip older + // versions of the given key. + return list, nil + } + + var valCopy []byte + if !item.IsDeletedOrExpired() { + // No need to copy value, if item is deleted or expired. + var err error + valCopy, err = item.ValueCopy(nil) + if err != nil { + stream.db.opt.Errorf("Key [%x, %d]. Error while fetching value [%v]\n", + item.Key(), item.Version(), err) + return nil, err + } + } + + // clear txn bits + meta := item.meta &^ (bitTxn | bitFinTxn) + kv := &pb.KV{ + Key: item.KeyCopy(nil), + Value: valCopy, + UserMeta: []byte{item.UserMeta()}, + Version: item.Version(), + ExpiresAt: item.ExpiresAt(), + Meta: []byte{meta}, + } + list.Kv = append(list.Kv, kv) + + switch { + case item.DiscardEarlierVersions(): + // If we need to discard earlier versions of this item, add a delete + // marker just below the current version. + list.Kv = append(list.Kv, &pb.KV{ + Key: item.KeyCopy(nil), + Version: item.Version() - 1, + Meta: []byte{bitDelete}, + }) + return list, nil + + case item.IsDeletedOrExpired(): + return list, nil + } + } + return list, nil + } + + var maxVersion uint64 + stream.Send = func(list *pb.KVList) error { + for _, kv := range list.Kv { + if maxVersion < kv.Version { + maxVersion = kv.Version + } + if err := writeTo(kv, w); err != nil { + return err + } + } + return nil + } + + if err := stream.Orchestrate(context.Background()); err != nil { + return 0, err + } + return maxVersion, nil +} + +func writeTo(entry *pb.KV, w io.Writer) error { + if err := binary.Write(w, binary.LittleEndian, uint64(entry.Size())); err != nil { + return err + } + buf, err := entry.Marshal() + if err != nil { + return err + } + _, err = w.Write(buf) + return err +} + +// Load reads a protobuf-encoded list of all entries from a reader and writes +// them to the database. This can be used to restore the database from a backup +// made by calling DB.Backup(). +// +// DB.Load() should be called on a database that is not running any other +// concurrent transactions while it is running. +func (db *DB) Load(r io.Reader) error { + br := bufio.NewReaderSize(r, 16<<10) + unmarshalBuf := make([]byte, 1<<10) + var entries []*Entry + var wg sync.WaitGroup + errChan := make(chan error, 1) + + // func to check for pending error before sending off a batch for writing + batchSetAsyncIfNoErr := func(entries []*Entry) error { + select { + case err := <-errChan: + return err + default: + wg.Add(1) + return db.batchSetAsync(entries, func(err error) { + defer wg.Done() + if err != nil { + select { + case errChan <- err: + default: + } + } + }) + } + } + + for { + var sz uint64 + err := binary.Read(br, binary.LittleEndian, &sz) + if err == io.EOF { + break + } else if err != nil { + return err + } + + if cap(unmarshalBuf) < int(sz) { + unmarshalBuf = make([]byte, sz) + } + + e := &pb.KV{} + if _, err = io.ReadFull(br, unmarshalBuf[:sz]); err != nil { + return err + } + if err = e.Unmarshal(unmarshalBuf[:sz]); err != nil { + return err + } + var userMeta byte + if len(e.UserMeta) > 0 { + userMeta = e.UserMeta[0] + } + entries = append(entries, &Entry{ + Key: y.KeyWithTs(e.Key, e.Version), + Value: e.Value, + UserMeta: userMeta, + ExpiresAt: e.ExpiresAt, + meta: e.Meta[0], + }) + // Update nextTxnTs, memtable stores this timestamp in badger head + // when flushed. + if e.Version >= db.orc.nextTxnTs { + db.orc.nextTxnTs = e.Version + 1 + } + + if len(entries) == 1000 { + if err := batchSetAsyncIfNoErr(entries); err != nil { + return err + } + entries = make([]*Entry, 0, 1000) + } + } + + if len(entries) > 0 { + if err := batchSetAsyncIfNoErr(entries); err != nil { + return err + } + } + wg.Wait() + + select { + case err := <-errChan: + return err + default: + // Mark all versions done up until nextTxnTs. + db.orc.txnMark.Done(db.orc.nextTxnTs - 1) + return nil + } +} diff --git a/vendor/github.com/dgraph-io/badger/backup_test.go b/vendor/github.com/dgraph-io/badger/backup_test.go new file mode 100644 index 00000000..3e8a1150 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/backup_test.go @@ -0,0 +1,519 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "bytes" + "fmt" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "reflect" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/require" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/pb" +) + +func TestBackupRestore1(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + db, err := Open(getTestOptions(dir)) + require.NoError(t, err) + + // Write some stuff + entries := []struct { + key []byte + val []byte + userMeta byte + version uint64 + }{ + {key: []byte("answer1"), val: []byte("42"), version: 1}, + {key: []byte("answer2"), val: []byte("43"), userMeta: 1, version: 2}, + } + + err = db.Update(func(txn *Txn) error { + e := entries[0] + err := txn.SetWithMeta(e.key, e.val, e.userMeta) + if err != nil { + return err + } + return nil + }) + require.NoError(t, err) + + err = db.Update(func(txn *Txn) error { + e := entries[1] + err := txn.SetWithMeta(e.key, e.val, e.userMeta) + if err != nil { + return err + } + return nil + }) + require.NoError(t, err) + + // Use different directory. + dir, err = ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + bak, err := ioutil.TempFile(dir, "badgerbak") + require.NoError(t, err) + ts, err := db.Backup(bak, 0) + t.Logf("New ts: %d\n", ts) + require.NoError(t, err) + require.NoError(t, bak.Close()) + require.NoError(t, db.Close()) + + db, err = Open(getTestOptions(dir)) + require.NoError(t, err) + defer db.Close() + bak, err = os.Open(bak.Name()) + require.NoError(t, err) + defer bak.Close() + + require.NoError(t, db.Load(bak)) + + err = db.View(func(txn *Txn) error { + opts := DefaultIteratorOptions + opts.AllVersions = true + it := txn.NewIterator(opts) + defer it.Close() + var count int + for it.Rewind(); it.Valid(); it.Next() { + item := it.Item() + val, err := item.ValueCopy(nil) + if err != nil { + return err + } + require.Equal(t, entries[count].key, item.Key()) + require.Equal(t, entries[count].val, val) + require.Equal(t, entries[count].version, item.Version()) + require.Equal(t, entries[count].userMeta, item.UserMeta()) + count++ + } + require.Equal(t, count, 2) + return nil + }) + require.NoError(t, err) +} + +func TestBackupRestore2(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "badger-test") + if err != nil { + t.Fatal(err) + } + defer func() { + os.RemoveAll(tmpdir) + }() + + s1Path := filepath.Join(tmpdir, "test1") + s2Path := filepath.Join(tmpdir, "test2") + s3Path := filepath.Join(tmpdir, "test3") + + opts := DefaultOptions + opts.Dir = s1Path + opts.ValueDir = s1Path + db1, err := Open(opts) + if err != nil { + t.Fatal(err) + } + key1 := []byte("key1") + key2 := []byte("key2") + rawValue := []byte("NotLongValue") + N := byte(251) + err = db1.Update(func(tx *Txn) error { + if err := tx.Set(key1, rawValue); err != nil { + return err + } + return tx.Set(key2, rawValue) + }) + if err != nil { + t.Fatal(err) + } + for i := byte(1); i < N; i++ { + err = db1.Update(func(tx *Txn) error { + if err := tx.Set(append(key1, i), rawValue); err != nil { + return err + } + return tx.Set(append(key2, i), rawValue) + }) + if err != nil { + t.Fatal(err) + } + } + var backup bytes.Buffer + _, err = db1.Backup(&backup, 0) + if err != nil { + t.Fatal(err) + } + fmt.Println("backup1 length:", backup.Len()) + + opts = DefaultOptions + opts.Dir = s2Path + opts.ValueDir = s2Path + db2, err := Open(opts) + if err != nil { + t.Fatal(err) + } + err = db2.Load(&backup) + if err != nil { + t.Fatal(err) + } + + for i := byte(1); i < N; i++ { + err = db2.View(func(tx *Txn) error { + k := append(key1, i) + item, err := tx.Get(k) + if err != nil { + if err == ErrKeyNotFound { + return fmt.Errorf("Key %q has been not found, but was set\n", k) + } + return err + } + v, err := item.ValueCopy(nil) + if err != nil { + return err + } + if !reflect.DeepEqual(v, rawValue) { + return fmt.Errorf("Values not match, got %v, expected %v", v, rawValue) + } + return nil + }) + if err != nil { + t.Fatal(err) + } + } + + for i := byte(1); i < N; i++ { + err = db2.Update(func(tx *Txn) error { + if err := tx.Set(append(key1, i), rawValue); err != nil { + return err + } + return tx.Set(append(key2, i), rawValue) + }) + if err != nil { + t.Fatal(err) + } + } + + backup.Reset() + _, err = db2.Backup(&backup, 0) + if err != nil { + t.Fatal(err) + } + fmt.Println("backup2 length:", backup.Len()) + opts = DefaultOptions + opts.Dir = s3Path + opts.ValueDir = s3Path + db3, err := Open(opts) + if err != nil { + t.Fatal(err) + } + + err = db3.Load(&backup) + if err != nil { + t.Fatal(err) + } + + for i := byte(1); i < N; i++ { + err = db3.View(func(tx *Txn) error { + k := append(key1, i) + item, err := tx.Get(k) + if err != nil { + if err == ErrKeyNotFound { + return fmt.Errorf("Key %q has been not found, but was set\n", k) + } + return err + } + v, err := item.ValueCopy(nil) + if err != nil { + return err + } + if !reflect.DeepEqual(v, rawValue) { + return fmt.Errorf("Values not match, got %v, expected %v", v, rawValue) + } + return nil + }) + if err != nil { + t.Fatal(err) + } + } + +} + +var randSrc = rand.NewSource(time.Now().UnixNano()) + +func createEntries(n int) []*pb.KV { + entries := make([]*pb.KV, n) + for i := 0; i < n; i++ { + entries[i] = &pb.KV{ + Key: []byte(fmt.Sprint("key", i)), + Value: []byte{1}, + UserMeta: []byte{0}, + Meta: []byte{0}, + } + } + return entries +} + +func populateEntries(db *DB, entries []*pb.KV) error { + return db.Update(func(txn *Txn) error { + var err error + for i, e := range entries { + if err = txn.Set(e.Key, e.Value); err != nil { + return err + } + entries[i].Version = 1 + } + return nil + }) +} + +func TestBackup(t *testing.T) { + var bb bytes.Buffer + + tmpdir, err := ioutil.TempDir("", "badger-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + opts := DefaultOptions + opts.Dir = filepath.Join(tmpdir, "backup0") + opts.ValueDir = opts.Dir + db1, err := Open(opts) + if err != nil { + t.Fatal(err) + } + + N := 1000 + entries := createEntries(N) + require.NoError(t, populateEntries(db1, entries)) + + _, err = db1.Backup(&bb, 0) + require.NoError(t, err) + + err = db1.View(func(txn *Txn) error { + opts := DefaultIteratorOptions + it := txn.NewIterator(opts) + defer it.Close() + var count int + for it.Rewind(); it.Valid(); it.Next() { + item := it.Item() + idx, err := strconv.Atoi(string(item.Key())[3:]) + if err != nil { + return err + } + if idx > N || !bytes.Equal(entries[idx].Key, item.Key()) { + return fmt.Errorf("%s: %s", string(item.Key()), ErrKeyNotFound) + } + count++ + } + if N != count { + return fmt.Errorf("wrong number of items: %d expected, %d actual", N, count) + } + return nil + }) + require.NoError(t, err) +} + +func TestBackupRestore3(t *testing.T) { + var bb bytes.Buffer + + tmpdir, err := ioutil.TempDir("", "badger-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + opts := DefaultOptions + N := 1000 + entries := createEntries(N) + + // backup + { + opts.Dir = filepath.Join(tmpdir, "backup1") + opts.ValueDir = opts.Dir + db1, err := Open(opts) + if err != nil { + t.Fatal(err) + } + + require.NoError(t, populateEntries(db1, entries)) + + _, err = db1.Backup(&bb, 0) + require.NoError(t, err) + require.NoError(t, db1.Close()) + } + require.True(t, len(entries) == N) + require.True(t, bb.Len() > 0) + + // restore + opts.Dir = filepath.Join(tmpdir, "restore1") + opts.ValueDir = opts.Dir + db2, err := Open(opts) + if err != nil { + t.Fatal(err) + } + require.NoError(t, db2.Load(&bb)) + + // verify + err = db2.View(func(txn *Txn) error { + opts := DefaultIteratorOptions + it := txn.NewIterator(opts) + defer it.Close() + var count int + for it.Rewind(); it.Valid(); it.Next() { + item := it.Item() + idx, err := strconv.Atoi(string(item.Key())[3:]) + if err != nil { + return err + } + if idx > N || !bytes.Equal(entries[idx].Key, item.Key()) { + return fmt.Errorf("%s: %s", string(item.Key()), ErrKeyNotFound) + } + count++ + } + if N != count { + return fmt.Errorf("wrong number of items: %d expected, %d actual", N, count) + } + return nil + }) + require.NoError(t, err) +} + +func TestBackupLoadIncremental(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "badger-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + opts := DefaultOptions + N := 100 + entries := createEntries(N) + updates := make(map[int]byte) + var bb bytes.Buffer + + // backup + { + opts.Dir = filepath.Join(tmpdir, "backup2") + opts.ValueDir = opts.Dir + db1, err := Open(opts) + if err != nil { + t.Fatal(err) + } + + require.NoError(t, populateEntries(db1, entries)) + since, err := db1.Backup(&bb, 0) + require.NoError(t, err) + + ints := rand.New(randSrc).Perm(N) + + // pick 10 items to mark as deleted. + err = db1.Update(func(txn *Txn) error { + for _, i := range ints[:10] { + if err := txn.Delete(entries[i].Key); err != nil { + return err + } + updates[i] = bitDelete + } + return nil + }) + require.NoError(t, err) + since, err = db1.Backup(&bb, since) + require.NoError(t, err) + + // pick 5 items to mark as expired. + err = db1.Update(func(txn *Txn) error { + for _, i := range (ints)[10:15] { + if err := txn.SetWithTTL( + entries[i].Key, entries[i].Value, -time.Hour); err != nil { + return err + } + updates[i] = bitDelete // expired + } + return nil + }) + require.NoError(t, err) + since, err = db1.Backup(&bb, since) + require.NoError(t, err) + + // pick 5 items to mark as discard. + err = db1.Update(func(txn *Txn) error { + for _, i := range ints[15:20] { + if err := txn.SetWithDiscard(entries[i].Key, entries[i].Value, 0); err != nil { + return err + } + updates[i] = bitDiscardEarlierVersions + } + return nil + }) + require.NoError(t, err) + _, err = db1.Backup(&bb, since) + require.NoError(t, err) + require.NoError(t, db1.Close()) + } + require.True(t, len(entries) == N) + require.True(t, bb.Len() > 0) + + // restore + opts.Dir = filepath.Join(tmpdir, "restore2") + opts.ValueDir = opts.Dir + db2, err := Open(opts) + if err != nil { + t.Fatal(err) + } + require.NoError(t, db2.Load(&bb)) + + // verify + actual := make(map[int]byte) + err = db2.View(func(txn *Txn) error { + opts := DefaultIteratorOptions + opts.AllVersions = true + it := txn.NewIterator(opts) + defer it.Close() + var count int + for it.Rewind(); it.Valid(); it.Next() { + item := it.Item() + idx, err := strconv.Atoi(string(item.Key())[3:]) + if err != nil { + return err + } + if item.IsDeletedOrExpired() { + _, ok := updates[idx] + if !ok { + return fmt.Errorf("%s: not expected to be updated but it is", + string(item.Key())) + } + actual[idx] = item.meta + count++ + continue + } + } + if len(updates) != count { + return fmt.Errorf("mismatched updated items: %d expected, %d actual", + len(updates), count) + } + return nil + }) + require.NoError(t, err, "%v %v", updates, actual) +} diff --git a/vendor/github.com/dgraph-io/badger/badger/.gitignore b/vendor/github.com/dgraph-io/badger/badger/.gitignore new file mode 100644 index 00000000..a8e6bd9e --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/badger/.gitignore @@ -0,0 +1 @@ +/badger diff --git a/vendor/github.com/dgraph-io/badger/badger/cmd/backup.go b/vendor/github.com/dgraph-io/badger/badger/cmd/backup.go new file mode 100644 index 00000000..1b47bd4f --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/badger/cmd/backup.go @@ -0,0 +1,72 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "os" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger" + "gx/ipfs/QmXj63M2w2Pq7mnBpcrs7Va8prmfhvfMUNqVhJ9TgjiMbT/cobra" +) + +var backupFile string +var truncate bool + +// backupCmd represents the backup command +var backupCmd = &cobra.Command{ + Use: "backup", + Short: "Backup Badger database.", + Long: `Backup Badger database to a file in a version-agnostic manner. + +Iterates over each key-value pair, encodes it along with its metadata and +version in protocol buffers and writes them to a file. This file can later be +used by the restore command to create an identical copy of the +database.`, + RunE: doBackup, +} + +func init() { + RootCmd.AddCommand(backupCmd) + backupCmd.Flags().StringVarP(&backupFile, "backup-file", "f", + "badger.bak", "File to backup to") + backupCmd.Flags().BoolVarP(&truncate, "truncate", "t", + false, "Allow value log truncation if required.") +} + +func doBackup(cmd *cobra.Command, args []string) error { + // Open DB + opts := badger.DefaultOptions + opts.Dir = sstDir + opts.ValueDir = vlogDir + opts.Truncate = truncate + db, err := badger.Open(opts) + if err != nil { + return err + } + defer db.Close() + + // Create File + f, err := os.Create(backupFile) + if err != nil { + return err + } + defer f.Close() + + // Run Backup + _, err = db.Backup(f, 0) + return err +} diff --git a/vendor/github.com/dgraph-io/badger/badger/cmd/bank.go b/vendor/github.com/dgraph-io/badger/badger/cmd/bank.go new file mode 100644 index 00000000..90039eac --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/badger/cmd/bank.go @@ -0,0 +1,451 @@ +/* + * Copyright 2018 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "bytes" + "errors" + "fmt" + "log" + "math" + "math/rand" + "strconv" + "sync" + "sync/atomic" + "time" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/options" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" + "gx/ipfs/QmXj63M2w2Pq7mnBpcrs7Va8prmfhvfMUNqVhJ9TgjiMbT/cobra" +) + +var testCmd = &cobra.Command{ + Use: "bank", + Short: "Run bank test on Badger.", + Long: ` +This command runs bank test on Badger, inspired by Jepsen. It creates many +accounts and moves money among them transactionally. It also reads the sum total +of all the accounts, to ensure that the total never changes. +`, +} + +var bankTest = &cobra.Command{ + Use: "test", + Short: "Execute bank test on Badger.", + RunE: runTest, +} + +var bankDisect = &cobra.Command{ + Use: "disect", + Short: "Disect the bank output.", + Long: ` +Disect the bank output BadgerDB to find the first transaction which causes +failure of the total invariant. +`, + RunE: runDisect, +} + +var numGoroutines, numAccounts, numPrevious int +var duration string +var stopAll int32 +var mmap bool + +const keyPrefix = "account:" + +const initialBal uint64 = 100 + +func init() { + RootCmd.AddCommand(testCmd) + testCmd.AddCommand(bankTest) + testCmd.AddCommand(bankDisect) + + testCmd.Flags().IntVarP( + &numAccounts, "accounts", "a", 10000, "Number of accounts in the bank.") + bankTest.Flags().IntVarP( + &numGoroutines, "conc", "c", 16, "Number of concurrent transactions to run.") + bankTest.Flags().StringVarP(&duration, "duration", "d", "3m", "How long to run the test.") + bankTest.Flags().BoolVarP(&mmap, "mmap", "m", false, "If true, mmap LSM tree. Default is RAM.") + bankDisect.Flags().IntVarP(&numPrevious, "previous", "p", 12, + "Starting from the violation txn, how many previous versions to retrieve.") +} + +func key(account int) []byte { + return []byte(fmt.Sprintf("%s%s", keyPrefix, strconv.Itoa(account))) +} + +func toAccount(key []byte) int { + i, err := strconv.Atoi(string(key[len(keyPrefix):])) + y.Check(err) + return i +} + +func toUint64(val []byte) uint64 { + u, err := strconv.ParseUint(string(val), 10, 64) + y.Check(err) + return uint64(u) +} + +func toSlice(bal uint64) []byte { + return []byte(strconv.FormatUint(bal, 10)) +} + +func getBalance(txn *badger.Txn, account int) (uint64, error) { + item, err := txn.Get(key(account)) + if err != nil { + return 0, err + } + + var bal uint64 + err = item.Value(func(v []byte) error { + bal = toUint64(v) + return nil + }) + return bal, err +} + +func putBalance(txn *badger.Txn, account int, bal uint64) error { + return txn.Set(key(account), toSlice(bal)) +} + +func min(a, b uint64) uint64 { + if a < b { + return a + } + return b +} + +var errAbandoned = errors.New("Transaction abandonded due to insufficient balance") + +func moveMoney(db *badger.DB, from, to int) error { + return db.Update(func(txn *badger.Txn) error { + balf, err := getBalance(txn, from) + if err != nil { + return err + } + balt, err := getBalance(txn, to) + if err != nil { + return err + } + + floor := min(balf, balt) + if floor < 5 { + return errAbandoned + } + // Move the money. + balf -= 5 + balt += 5 + + if err = putBalance(txn, from, balf); err != nil { + return err + } + return putBalance(txn, to, balt) + }) +} + +type account struct { + Id int + Bal uint64 +} + +func diff(a, b []account) string { + var buf bytes.Buffer + y.AssertTruef(len(a) == len(b), "len(a)=%d. len(b)=%d\n", len(a), len(b)) + for i := range a { + ai := a[i] + bi := b[i] + if ai.Id != bi.Id || ai.Bal != bi.Bal { + buf.WriteString(fmt.Sprintf("Index: %d. Account [%+v] -> [%+v]\n", i, ai, bi)) + } + } + return buf.String() +} + +var errFailure = errors.New("Found an balance mismatch. Test failed.") + +// seekTotal retrives the total of all accounts by seeking for each account key. +func seekTotal(txn *badger.Txn) ([]account, error) { + expected := uint64(numAccounts) * uint64(initialBal) + var accounts []account + + var total uint64 + for i := 0; i < numAccounts; i++ { + item, err := txn.Get(key(i)) + if err != nil { + log.Printf("Error for account: %d. err=%v. key=%q\n", i, err, key(i)) + return accounts, err + } + val, err := item.ValueCopy(nil) + if err != nil { + return accounts, err + } + acc := account{ + Id: i, + Bal: toUint64(val), + } + accounts = append(accounts, acc) + total += acc.Bal + } + if total != expected { + log.Printf("Balance did NOT match up. Expected: %d. Received: %d", + expected, total) + atomic.AddInt32(&stopAll, 1) + return accounts, errFailure + } + return accounts, nil +} + +// Range is [lowTs, highTs). +func findFirstInvalidTxn(db *badger.DB, lowTs, highTs uint64) uint64 { + checkAt := func(ts uint64) error { + txn := db.NewTransactionAt(ts, false) + _, err := seekTotal(txn) + txn.Discard() + return err + } + + if highTs-lowTs < 1 { + log.Printf("Checking at lowTs: %d\n", lowTs) + err := checkAt(lowTs) + if err == errFailure { + fmt.Printf("Violation at ts: %d\n", lowTs) + return lowTs + } else if err != nil { + log.Printf("Error at lowTs: %d. Err=%v\n", lowTs, err) + return 0 + } + fmt.Printf("No violation found at ts: %d\n", lowTs) + return 0 + } + + midTs := (lowTs + highTs) / 2 + log.Println() + log.Printf("Checking. low=%d. high=%d. mid=%d\n", lowTs, highTs, midTs) + err := checkAt(midTs) + if err == badger.ErrKeyNotFound || err == nil { + // If no failure, move to higher ts. + return findFirstInvalidTxn(db, midTs+1, highTs) + } + // Found an error. + return findFirstInvalidTxn(db, lowTs, midTs) +} + +func compareTwo(db *badger.DB, before, after uint64) { + fmt.Printf("Comparing @ts=%d with @ts=%d\n", before, after) + txn := db.NewTransactionAt(before, false) + prev, err := seekTotal(txn) + if err == errFailure { + // pass + } else { + y.Check(err) + } + txn.Discard() + + txn = db.NewTransactionAt(after, false) + now, err := seekTotal(txn) + if err == errFailure { + // pass + } else { + y.Check(err) + } + txn.Discard() + + fmt.Println(diff(prev, now)) +} + +func runDisect(cmd *cobra.Command, args []string) error { + opts := badger.DefaultOptions + opts.Dir = sstDir + opts.ValueDir = vlogDir + opts.ReadOnly = true + + // The total did not match up. So, let's disect the DB to find the + // transction which caused the total mismatch. + db, err := badger.OpenManaged(opts) + if err != nil { + return err + } + fmt.Println("opened db") + + var min, max uint64 = math.MaxUint64, 0 + { + txn := db.NewTransactionAt(uint64(math.MaxUint32), false) + iopt := badger.DefaultIteratorOptions + iopt.AllVersions = true + itr := txn.NewIterator(iopt) + for itr.Rewind(); itr.Valid(); itr.Next() { + item := itr.Item() + if min > item.Version() { + min = item.Version() + } + if max < item.Version() { + max = item.Version() + } + } + itr.Close() + txn.Discard() + } + + log.Printf("min=%d. max=%d\n", min, max) + ts := findFirstInvalidTxn(db, min, max) + fmt.Println() + if ts == 0 { + fmt.Println("Nothing found. Exiting.") + return nil + } + + for i := 0; i < numPrevious; i++ { + compareTwo(db, ts-1-uint64(i), ts-uint64(i)) + } + return nil +} + +func runTest(cmd *cobra.Command, args []string) error { + rand.Seed(time.Now().UnixNano()) + + // Open DB + opts := badger.DefaultOptions + opts.Dir = sstDir + opts.ValueDir = vlogDir + opts.MaxTableSize = 4 << 20 // Force more compactions. + opts.NumLevelZeroTables = 2 + opts.NumMemtables = 2 + // Do not GC any versions, because we need them for the disect. + opts.NumVersionsToKeep = int(math.MaxInt32) + opts.ValueThreshold = 1 // Make all values go to value log. + if mmap { + opts.TableLoadingMode = options.MemoryMap + } + log.Printf("Opening DB with options: %+v\n", opts) + + db, err := badger.Open(opts) + if err != nil { + return err + } + defer db.Close() + + wb := db.NewWriteBatch() + for i := 0; i < numAccounts; i++ { + y.Check(wb.Set(key(i), toSlice(initialBal), 0)) + } + log.Println("Waiting for writes to be done...") + y.Check(wb.Flush()) + + log.Println("Bank initialization OK. Commencing test.") + log.Printf("Running with %d accounts, and %d goroutines.\n", numAccounts, numGoroutines) + log.Printf("Using keyPrefix: %s\n", keyPrefix) + + dur, err := time.ParseDuration(duration) + y.Check(err) + + // startTs := time.Now() + endTs := time.Now().Add(dur) + var total, errors, reads uint64 + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + for range ticker.C { + if atomic.LoadInt32(&stopAll) > 0 { + // Do not proceed. + return + } + // log.Printf("[%6s] Total: %d. Errors: %d Reads: %d.\n", + // time.Since(startTs).Round(time.Second).String(), + // atomic.LoadUint64(&total), + // atomic.LoadUint64(&errors), + // atomic.LoadUint64(&reads)) + if time.Now().After(endTs) { + return + } + } + }() + + // RW goroutines. + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + ticker := time.NewTicker(10 * time.Microsecond) + defer ticker.Stop() + + for range ticker.C { + if atomic.LoadInt32(&stopAll) > 0 { + // Do not proceed. + return + } + if time.Now().After(endTs) { + return + } + from := rand.Intn(numAccounts) + to := rand.Intn(numAccounts) + if from == to { + continue + } + err := moveMoney(db, from, to) + atomic.AddUint64(&total, 1) + if err == nil { + log.Printf("Moved $5. %d -> %d\n", from, to) + } else { + atomic.AddUint64(&errors, 1) + } + } + }() + } + + // RO goroutine. + wg.Add(1) + go func() { + defer wg.Done() + + ticker := time.NewTicker(10 * time.Microsecond) + defer ticker.Stop() + + for range ticker.C { + if atomic.LoadInt32(&stopAll) > 0 { + // Do not proceed. + return + } + if time.Now().After(endTs) { + return + } + + y.Check(db.View(func(txn *badger.Txn) error { + _, err := seekTotal(txn) + if err != nil { + log.Printf("Error while calculating total: %v", err) + } else { + atomic.AddUint64(&reads, 1) + } + return nil + })) + } + }() + wg.Wait() + + if atomic.LoadInt32(&stopAll) == 0 { + log.Println("Test OK") + return nil + } + log.Println("Test FAILED") + return fmt.Errorf("Test FAILED") +} diff --git a/vendor/github.com/dgraph-io/badger/badger/cmd/fill.go b/vendor/github.com/dgraph-io/badger/badger/cmd/fill.go new file mode 100644 index 00000000..6b103bb2 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/badger/cmd/fill.go @@ -0,0 +1,93 @@ +/* + * Copyright 2018 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "crypto/rand" + "time" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/options" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" + "gx/ipfs/QmXj63M2w2Pq7mnBpcrs7Va8prmfhvfMUNqVhJ9TgjiMbT/cobra" +) + +var fillCmd = &cobra.Command{ + Use: "fill", + Short: "Fill Badger with random data.", + Long: ` +This command would fill Badger with random data. Useful for testing and performance analysis. +`, + RunE: fill, +} + +var keySz, valSz int +var numKeys float64 +var force bool + +const mil float64 = 1e6 + +func init() { + RootCmd.AddCommand(fillCmd) + fillCmd.Flags().IntVarP(&keySz, "key-size", "k", 32, "Size of key") + fillCmd.Flags().IntVarP(&valSz, "val-size", "v", 128, "Size of value") + fillCmd.Flags().Float64VarP(&numKeys, "keys-mil", "m", 10.0, + "Number of keys to add in millions") + fillCmd.Flags().BoolVarP(&force, "force-compact", "f", true, "Force compact level 0 on close.") +} + +func fill(cmd *cobra.Command, args []string) error { + opts := badger.DefaultOptions + opts.Dir = sstDir + opts.ValueDir = vlogDir + opts.Truncate = truncate + opts.SyncWrites = false + opts.CompactL0OnClose = force + opts.TableLoadingMode = options.FileIO + opts.ValueLogLoadingMode = options.FileIO + + db, err := badger.Open(opts) + if err != nil { + return err + } + defer func() { + start := time.Now() + err := db.Close() + opts.Infof("DB.Close. Error: %v. Time taken: %s", err, time.Since(start)) + }() + + start := time.Now() + batch := db.NewWriteBatch() + num := int64(numKeys * mil) + for i := int64(1); i <= num; i++ { + k := make([]byte, keySz) + v := make([]byte, valSz) + y.Check2(rand.Read(k)) + y.Check2(rand.Read(v)) + if err := batch.Set(k, v, 0); err != nil { + return err + } + if i%1e5 == 0 { + opts.Infof("Written keys: %d\n", i) + } + } + if err := batch.Flush(); err != nil { + return err + } + opts.Infof("%d keys written. Time taken: %s\n", num, time.Since(start)) + return nil +} diff --git a/vendor/github.com/dgraph-io/badger/badger/cmd/flatten.go b/vendor/github.com/dgraph-io/badger/badger/cmd/flatten.go new file mode 100644 index 00000000..4b67c982 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/badger/cmd/flatten.go @@ -0,0 +1,56 @@ +/* + * Copyright 2018 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger" + "gx/ipfs/QmXj63M2w2Pq7mnBpcrs7Va8prmfhvfMUNqVhJ9TgjiMbT/cobra" +) + +var flattenCmd = &cobra.Command{ + Use: "flatten", + Short: "Flatten the LSM tree.", + Long: ` +This command would compact all the LSM tables into one level. +`, + RunE: flatten, +} + +var numWorkers int + +func init() { + RootCmd.AddCommand(flattenCmd) + flattenCmd.Flags().IntVarP(&numWorkers, "num-workers", "w", 1, + "Number of concurrent compactors to run. More compactors would use more"+ + " server resources to potentially achieve faster compactions.") +} + +func flatten(cmd *cobra.Command, args []string) error { + opts := badger.DefaultOptions + opts.Dir = sstDir + opts.ValueDir = vlogDir + opts.Truncate = truncate + opts.NumCompactors = 0 + + db, err := badger.Open(opts) + if err != nil { + return err + } + defer db.Close() + + return db.Flatten(numWorkers) +} diff --git a/vendor/github.com/dgraph-io/badger/badger/cmd/info.go b/vendor/github.com/dgraph-io/badger/badger/cmd/info.go new file mode 100644 index 00000000..ca8c28b5 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/badger/cmd/info.go @@ -0,0 +1,294 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "time" + + humanize "gx/ipfs/QmQMxG9D52TirZd9eLA37nxiNspnMRkKbyPWrVAa1gvtSy/go-humanize" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/options" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/table" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" + "gx/ipfs/QmXj63M2w2Pq7mnBpcrs7Va8prmfhvfMUNqVhJ9TgjiMbT/cobra" +) + +var infoCmd = &cobra.Command{ + Use: "info", + Short: "Health info about Badger database.", + Long: ` +This command prints information about the badger key-value store. It reads MANIFEST and prints its +info. It also prints info about missing/extra files, and general information about the value log +files (which are not referenced by the manifest). Use this tool to report any issues about Badger +to the Dgraph team. +`, + Run: func(cmd *cobra.Command, args []string) { + err := printInfo(sstDir, vlogDir) + if err != nil { + fmt.Println("Error:", err.Error()) + os.Exit(1) + } + if !showTables { + return + } + err = tableInfo(sstDir, vlogDir) + if err != nil { + fmt.Println("Error:", err.Error()) + os.Exit(1) + } + }, +} + +var showTables bool + +func init() { + RootCmd.AddCommand(infoCmd) + infoCmd.Flags().BoolVarP(&showTables, "show-tables", "s", false, + "If set to true, show tables as well.") +} + +func hbytes(sz int64) string { + return humanize.Bytes(uint64(sz)) +} + +func dur(src, dst time.Time) string { + return humanize.RelTime(dst, src, "earlier", "later") +} + +func tableInfo(dir, valueDir string) error { + // Open DB + opts := badger.DefaultOptions + opts.TableLoadingMode = options.MemoryMap + opts.Dir = sstDir + opts.ValueDir = vlogDir + opts.ReadOnly = true + + db, err := badger.Open(opts) + if err != nil { + return err + } + defer db.Close() + + tables := db.Tables() + for _, t := range tables { + lk, lv := y.ParseKey(t.Left), y.ParseTs(t.Left) + rk, rv := y.ParseKey(t.Right), y.ParseTs(t.Right) + fmt.Printf("SSTable [L%d, %03d] [%20X, v%-10d -> %20X, v%-10d]\n", + t.Level, t.ID, lk, lv, rk, rv) + } + return nil +} + +func printInfo(dir, valueDir string) error { + if dir == "" { + return fmt.Errorf("--dir not supplied") + } + if valueDir == "" { + valueDir = dir + } + fp, err := os.Open(filepath.Join(dir, badger.ManifestFilename)) + if err != nil { + return err + } + defer func() { + if fp != nil { + fp.Close() + } + }() + manifest, truncOffset, err := badger.ReplayManifestFile(fp) + if err != nil { + return err + } + fp.Close() + fp = nil + + fileinfos, err := ioutil.ReadDir(dir) + if err != nil { + return err + } + fileinfoByName := make(map[string]os.FileInfo) + fileinfoMarked := make(map[string]bool) + for _, info := range fileinfos { + fileinfoByName[info.Name()] = info + fileinfoMarked[info.Name()] = false + } + + fmt.Println() + var baseTime time.Time + // fmt.Print("\n[Manifest]\n") + manifestTruncated := false + manifestInfo, ok := fileinfoByName[badger.ManifestFilename] + if ok { + fileinfoMarked[badger.ManifestFilename] = true + truncatedString := "" + if truncOffset != manifestInfo.Size() { + truncatedString = fmt.Sprintf(" [TRUNCATED to %d]", truncOffset) + manifestTruncated = true + } + + baseTime = manifestInfo.ModTime() + fmt.Printf("[%25s] %-12s %6s MA%s\n", manifestInfo.ModTime().Format(time.RFC3339), + manifestInfo.Name(), hbytes(manifestInfo.Size()), truncatedString) + } else { + fmt.Printf("%s [MISSING]\n", manifestInfo.Name()) + } + + numMissing := 0 + numEmpty := 0 + + levelSizes := make([]int64, len(manifest.Levels)) + for level, lm := range manifest.Levels { + // fmt.Printf("\n[Level %d]\n", level) + // We create a sorted list of table ID's so that output is in consistent order. + tableIDs := make([]uint64, 0, len(lm.Tables)) + for id := range lm.Tables { + tableIDs = append(tableIDs, id) + } + sort.Slice(tableIDs, func(i, j int) bool { + return tableIDs[i] < tableIDs[j] + }) + for _, tableID := range tableIDs { + tableFile := table.IDToFilename(tableID) + tm, ok1 := manifest.Tables[tableID] + file, ok2 := fileinfoByName[tableFile] + if ok1 && ok2 { + fileinfoMarked[tableFile] = true + emptyString := "" + fileSize := file.Size() + if fileSize == 0 { + emptyString = " [EMPTY]" + numEmpty++ + } + levelSizes[level] += fileSize + // (Put level on every line to make easier to process with sed/perl.) + fmt.Printf("[%25s] %-12s %6s L%d %x%s\n", dur(baseTime, file.ModTime()), + tableFile, hbytes(fileSize), level, tm.Checksum, emptyString) + } else { + fmt.Printf("%s [MISSING]\n", tableFile) + numMissing++ + } + } + } + + valueDirFileinfos := fileinfos + if valueDir != dir { + valueDirFileinfos, err = ioutil.ReadDir(valueDir) + if err != nil { + return err + } + } + + // If valueDir is different from dir, holds extra files in the value dir. + valueDirExtras := []os.FileInfo{} + + valueLogSize := int64(0) + // fmt.Print("\n[Value Log]\n") + for _, file := range valueDirFileinfos { + if !strings.HasSuffix(file.Name(), ".vlog") { + if valueDir != dir { + valueDirExtras = append(valueDirExtras, file) + } + continue + } + + fileSize := file.Size() + emptyString := "" + if fileSize == 0 { + emptyString = " [EMPTY]" + numEmpty++ + } + valueLogSize += fileSize + fmt.Printf("[%25s] %-12s %6s VL%s\n", dur(baseTime, file.ModTime()), file.Name(), + hbytes(fileSize), emptyString) + + fileinfoMarked[file.Name()] = true + } + + numExtra := 0 + for _, file := range fileinfos { + if fileinfoMarked[file.Name()] { + continue + } + if numExtra == 0 { + fmt.Print("\n[EXTRA]\n") + } + fmt.Printf("[%s] %-12s %6s\n", file.ModTime().Format(time.RFC3339), + file.Name(), hbytes(file.Size())) + numExtra++ + } + + numValueDirExtra := 0 + for _, file := range valueDirExtras { + if numValueDirExtra == 0 { + fmt.Print("\n[ValueDir EXTRA]\n") + } + fmt.Printf("[%s] %-12s %6s\n", file.ModTime().Format(time.RFC3339), + file.Name(), hbytes(file.Size())) + numValueDirExtra++ + } + + fmt.Print("\n[Summary]\n") + totalIndexSize := int64(0) + for i, sz := range levelSizes { + fmt.Printf("Level %d size: %12s\n", i, hbytes(sz)) + totalIndexSize += sz + } + + fmt.Printf("Total index size: %8s\n", hbytes(totalIndexSize)) + fmt.Printf("Value log size: %10s\n", hbytes(valueLogSize)) + fmt.Println() + totalExtra := numExtra + numValueDirExtra + if totalExtra == 0 && numMissing == 0 && numEmpty == 0 && !manifestTruncated { + fmt.Println("Abnormalities: None.") + } else { + fmt.Println("Abnormalities:") + } + fmt.Printf("%d extra %s.\n", totalExtra, pluralFiles(totalExtra)) + fmt.Printf("%d missing %s.\n", numMissing, pluralFiles(numMissing)) + fmt.Printf("%d empty %s.\n", numEmpty, pluralFiles(numEmpty)) + fmt.Printf("%d truncated %s.\n", boolToNum(manifestTruncated), + pluralManifest(manifestTruncated)) + + return nil +} + +func boolToNum(x bool) int { + if x { + return 1 + } + return 0 +} + +func pluralManifest(manifestTruncated bool) string { + if manifestTruncated { + return "manifest" + } + return "manifests" +} + +func pluralFiles(count int) string { + if count == 1 { + return "file" + } + return "files" +} diff --git a/vendor/github.com/dgraph-io/badger/badger/cmd/restore.go b/vendor/github.com/dgraph-io/badger/badger/cmd/restore.go new file mode 100644 index 00000000..fc54e1ce --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/badger/cmd/restore.go @@ -0,0 +1,81 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "errors" + "os" + "path" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger" + "gx/ipfs/QmXj63M2w2Pq7mnBpcrs7Va8prmfhvfMUNqVhJ9TgjiMbT/cobra" +) + +var restoreFile string + +// restoreCmd represents the restore command +var restoreCmd = &cobra.Command{ + Use: "restore", + Short: "Restore Badger database.", + Long: `Restore Badger database from a file. + +It reads a file generated using the backup command (or by calling the +DB.Backup() API method) and writes each key-value pair found in the file to +the Badger database. + +Restore creates a new database, and currently does not work on an already +existing database.`, + RunE: doRestore, +} + +func init() { + RootCmd.AddCommand(restoreCmd) + restoreCmd.Flags().StringVarP(&restoreFile, "backup-file", "f", + "badger.bak", "File to restore from") +} + +func doRestore(cmd *cobra.Command, args []string) error { + // Check if the DB already exists + manifestFile := path.Join(sstDir, badger.ManifestFilename) + if _, err := os.Stat(manifestFile); err == nil { // No error. File already exists. + return errors.New("Cannot restore to an already existing database") + } else if os.IsNotExist(err) { + // pass + } else { // Return an error if anything other than the error above + return err + } + + // Open DB + opts := badger.DefaultOptions + opts.Dir = sstDir + opts.ValueDir = vlogDir + db, err := badger.Open(opts) + if err != nil { + return err + } + defer db.Close() + + // Open File + f, err := os.Open(restoreFile) + if err != nil { + return err + } + defer f.Close() + + // Run restore + return db.Load(f) +} diff --git a/vendor/github.com/dgraph-io/badger/badger/cmd/root.go b/vendor/github.com/dgraph-io/badger/badger/cmd/root.go new file mode 100644 index 00000000..1506ce53 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/badger/cmd/root.go @@ -0,0 +1,65 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "errors" + "fmt" + "os" + "strings" + + "gx/ipfs/QmXj63M2w2Pq7mnBpcrs7Va8prmfhvfMUNqVhJ9TgjiMbT/cobra" +) + +var sstDir, vlogDir string + +// RootCmd represents the base command when called without any subcommands +var RootCmd = &cobra.Command{ + Use: "badger", + Short: "Tools to manage Badger database.", + PersistentPreRunE: validateRootCmdArgs, +} + +// Execute adds all child commands to the root command and sets flags appropriately. +// This is called by main.main(). It only needs to happen once to the rootCmd. +func Execute() { + if err := RootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} + +func init() { + RootCmd.PersistentFlags().StringVar(&sstDir, "dir", "", + "Directory where the LSM tree files are located. (required)") + + RootCmd.PersistentFlags().StringVar(&vlogDir, "vlog-dir", "", + "Directory where the value log files are located, if different from --dir") +} + +func validateRootCmdArgs(cmd *cobra.Command, args []string) error { + if strings.HasPrefix(cmd.Use, "help ") { // No need to validate if it is help + return nil + } + if sstDir == "" { + return errors.New("--dir not specified") + } + if vlogDir == "" { + vlogDir = sstDir + } + return nil +} diff --git a/vendor/github.com/dgraph-io/badger/badger/main.go b/vendor/github.com/dgraph-io/badger/badger/main.go new file mode 100644 index 00000000..e5864d8f --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/badger/main.go @@ -0,0 +1,42 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "fmt" + "net/http" + _ "net/http/pprof" + "runtime" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/badger/cmd" +) + +func main() { + go func() { + for i := 8080; i < 9080; i++ { + fmt.Printf("Listening for /debug HTTP requests at port: %d\n", i) + if err := http.ListenAndServe(fmt.Sprintf("localhost:%d", i), nil); err != nil { + fmt.Println("Port busy. Trying another one...") + continue + + } + } + }() + runtime.SetBlockProfileRate(100) + runtime.GOMAXPROCS(128) + cmd.Execute() +} diff --git a/vendor/github.com/dgraph-io/badger/batch.go b/vendor/github.com/dgraph-io/badger/batch.go new file mode 100644 index 00000000..2c26d4b0 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/batch.go @@ -0,0 +1,153 @@ +/* + * Copyright 2018 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "sync" + "time" +) + +// WriteBatch holds the necessary info to perform batched writes. +type WriteBatch struct { + sync.Mutex + txn *Txn + db *DB + wg sync.WaitGroup + err error +} + +// NewWriteBatch creates a new WriteBatch. This provides a way to conveniently do a lot of writes, +// batching them up as tightly as possible in a single transaction and using callbacks to avoid +// waiting for them to commit, thus achieving good performance. This API hides away the logic of +// creating and committing transactions. Due to the nature of SSI guaratees provided by Badger, +// blind writes can never encounter transaction conflicts (ErrConflict). +func (db *DB) NewWriteBatch() *WriteBatch { + return &WriteBatch{db: db, txn: db.newTransaction(true, true)} +} + +// Cancel function must be called if there's a chance that Flush might not get +// called. If neither Flush or Cancel is called, the transaction oracle would +// never get a chance to clear out the row commit timestamp map, thus causing an +// unbounded memory consumption. Typically, you can call Cancel as a defer +// statement right after NewWriteBatch is called. +// +// Note that any committed writes would still go through despite calling Cancel. +func (wb *WriteBatch) Cancel() { + wb.wg.Wait() + wb.txn.Discard() +} + +func (wb *WriteBatch) callback(err error) { + // sync.WaitGroup is thread-safe, so it doesn't need to be run inside wb.Lock. + defer wb.wg.Done() + if err == nil { + return + } + + wb.Lock() + defer wb.Unlock() + if wb.err != nil { + return + } + wb.err = err +} + +// SetEntry is the equivalent of Txn.SetEntry. +func (wb *WriteBatch) SetEntry(e *Entry) error { + wb.Lock() + defer wb.Unlock() + + if err := wb.txn.SetEntry(e); err != ErrTxnTooBig { + return err + } + // Txn has reached it's zenith. Commit now. + if cerr := wb.commit(); cerr != nil { + return cerr + } + // This time the error must not be ErrTxnTooBig, otherwise, we make the + // error permanent. + if err := wb.txn.SetEntry(e); err != nil { + wb.err = err + return err + } + return nil +} + +// Set is equivalent of Txn.SetWithMeta. +func (wb *WriteBatch) Set(k, v []byte, meta byte) error { + e := &Entry{Key: k, Value: v, UserMeta: meta} + return wb.SetEntry(e) +} + +// SetWithTTL is equivalent of Txn.SetWithTTL. +func (wb *WriteBatch) SetWithTTL(key, val []byte, dur time.Duration) error { + expire := time.Now().Add(dur).Unix() + e := &Entry{Key: key, Value: val, ExpiresAt: uint64(expire)} + return wb.SetEntry(e) +} + +// Delete is equivalent of Txn.Delete. +func (wb *WriteBatch) Delete(k []byte) error { + wb.Lock() + defer wb.Unlock() + + if err := wb.txn.Delete(k); err != ErrTxnTooBig { + return err + } + if err := wb.commit(); err != nil { + return err + } + if err := wb.txn.Delete(k); err != nil { + wb.err = err + return err + } + return nil +} + +// Caller to commit must hold a write lock. +func (wb *WriteBatch) commit() error { + if wb.err != nil { + return wb.err + } + // Get a new txn before we commit this one. So, the new txn doesn't need + // to wait for this one to commit. + wb.wg.Add(1) + wb.txn.CommitWith(wb.callback) + wb.txn = wb.db.newTransaction(true, true) + wb.txn.readTs = 0 // We're not reading anything. + return wb.err +} + +// Flush must be called at the end to ensure that any pending writes get committed to Badger. Flush +// returns any error stored by WriteBatch. +func (wb *WriteBatch) Flush() error { + wb.Lock() + _ = wb.commit() + wb.txn.Discard() + wb.Unlock() + + wb.wg.Wait() + // Safe to access error without any synchronization here. + return wb.err +} + +// Error returns any errors encountered so far. No commits would be run once an error is detected. +func (wb *WriteBatch) Error() error { + wb.Lock() + defer wb.Unlock() + return wb.err +} diff --git a/vendor/github.com/dgraph-io/badger/batch_test.go b/vendor/github.com/dgraph-io/badger/batch_test.go new file mode 100644 index 00000000..041fe92a --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/batch_test.go @@ -0,0 +1,69 @@ +/* + * Copyright 2018 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestWriteBatch(t *testing.T) { + key := func(i int) []byte { + return []byte(fmt.Sprintf("%10d", i)) + } + val := func(i int) []byte { + return []byte(fmt.Sprintf("%128d", i)) + } + + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + wb := db.NewWriteBatch() + defer wb.Cancel() + + N, M := 50000, 1000 + start := time.Now() + + for i := 0; i < N; i++ { + require.NoError(t, wb.Set(key(i), val(i), 0)) + } + for i := 0; i < M; i++ { + require.NoError(t, wb.Delete(key(i))) + } + require.NoError(t, wb.Flush()) + t.Logf("Time taken for %d writes (w/ test options): %s\n", N+M, time.Since(start)) + + err := db.View(func(txn *Txn) error { + itr := txn.NewIterator(DefaultIteratorOptions) + defer itr.Close() + + i := M + for itr.Rewind(); itr.Valid(); itr.Next() { + item := itr.Item() + require.Equal(t, string(key(i)), string(item.Key())) + valcopy, err := item.ValueCopy(nil) + require.NoError(t, err) + require.Equal(t, val(i), valcopy) + i++ + } + require.Equal(t, N, i) + return nil + }) + require.NoError(t, err) + }) +} diff --git a/vendor/github.com/dgraph-io/badger/compaction.go b/vendor/github.com/dgraph-io/badger/compaction.go new file mode 100644 index 00000000..7fd1b174 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/compaction.go @@ -0,0 +1,208 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "bytes" + "fmt" + "log" + "math" + "sync" + + "gx/ipfs/QmRvYNctevGUW52urgmoFZscT6buMKqhHezLUS64WepGWn/go-net/trace" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/table" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" +) + +type keyRange struct { + left []byte + right []byte + inf bool +} + +var infRange = keyRange{inf: true} + +func (r keyRange) String() string { + return fmt.Sprintf("[left=%x, right=%x, inf=%v]", r.left, r.right, r.inf) +} + +func (r keyRange) equals(dst keyRange) bool { + return bytes.Equal(r.left, dst.left) && + bytes.Equal(r.right, dst.right) && + r.inf == dst.inf +} + +func (r keyRange) overlapsWith(dst keyRange) bool { + if r.inf || dst.inf { + return true + } + + // If my left is greater than dst right, we have no overlap. + if y.CompareKeys(r.left, dst.right) > 0 { + return false + } + // If my right is less than dst left, we have no overlap. + if y.CompareKeys(r.right, dst.left) < 0 { + return false + } + // We have overlap. + return true +} + +func getKeyRange(tables []*table.Table) keyRange { + y.AssertTrue(len(tables) > 0) + smallest := tables[0].Smallest() + biggest := tables[0].Biggest() + for i := 1; i < len(tables); i++ { + if y.CompareKeys(tables[i].Smallest(), smallest) < 0 { + smallest = tables[i].Smallest() + } + if y.CompareKeys(tables[i].Biggest(), biggest) > 0 { + biggest = tables[i].Biggest() + } + } + return keyRange{ + left: y.KeyWithTs(y.ParseKey(smallest), math.MaxUint64), + right: y.KeyWithTs(y.ParseKey(biggest), 0), + } +} + +type levelCompactStatus struct { + ranges []keyRange + delSize int64 +} + +func (lcs *levelCompactStatus) debug() string { + var b bytes.Buffer + for _, r := range lcs.ranges { + b.WriteString(r.String()) + } + return b.String() +} + +func (lcs *levelCompactStatus) overlapsWith(dst keyRange) bool { + for _, r := range lcs.ranges { + if r.overlapsWith(dst) { + return true + } + } + return false +} + +func (lcs *levelCompactStatus) remove(dst keyRange) bool { + final := lcs.ranges[:0] + var found bool + for _, r := range lcs.ranges { + if !r.equals(dst) { + final = append(final, r) + } else { + found = true + } + } + lcs.ranges = final + return found +} + +type compactStatus struct { + sync.RWMutex + levels []*levelCompactStatus +} + +func (cs *compactStatus) toLog(tr trace.Trace) { + cs.RLock() + defer cs.RUnlock() + + tr.LazyPrintf("Compaction status:") + for i, l := range cs.levels { + if len(l.debug()) == 0 { + continue + } + tr.LazyPrintf("[%d] %s", i, l.debug()) + } +} + +func (cs *compactStatus) overlapsWith(level int, this keyRange) bool { + cs.RLock() + defer cs.RUnlock() + + thisLevel := cs.levels[level] + return thisLevel.overlapsWith(this) +} + +func (cs *compactStatus) delSize(l int) int64 { + cs.RLock() + defer cs.RUnlock() + return cs.levels[l].delSize +} + +type thisAndNextLevelRLocked struct{} + +// compareAndAdd will check whether we can run this compactDef. That it doesn't overlap with any +// other running compaction. If it can be run, it would store this run in the compactStatus state. +func (cs *compactStatus) compareAndAdd(_ thisAndNextLevelRLocked, cd compactDef) bool { + cs.Lock() + defer cs.Unlock() + + level := cd.thisLevel.level + + y.AssertTruef(level < len(cs.levels)-1, "Got level %d. Max levels: %d", level, len(cs.levels)) + thisLevel := cs.levels[level] + nextLevel := cs.levels[level+1] + + if thisLevel.overlapsWith(cd.thisRange) { + return false + } + if nextLevel.overlapsWith(cd.nextRange) { + return false + } + // Check whether this level really needs compaction or not. Otherwise, we'll end up + // running parallel compactions for the same level. + // Update: We should not be checking size here. Compaction priority already did the size checks. + // Here we should just be executing the wish of others. + + thisLevel.ranges = append(thisLevel.ranges, cd.thisRange) + nextLevel.ranges = append(nextLevel.ranges, cd.nextRange) + thisLevel.delSize += cd.thisSize + return true +} + +func (cs *compactStatus) delete(cd compactDef) { + cs.Lock() + defer cs.Unlock() + + level := cd.thisLevel.level + y.AssertTruef(level < len(cs.levels)-1, "Got level %d. Max levels: %d", level, len(cs.levels)) + + thisLevel := cs.levels[level] + nextLevel := cs.levels[level+1] + + thisLevel.delSize -= cd.thisSize + found := thisLevel.remove(cd.thisRange) + found = nextLevel.remove(cd.nextRange) && found + + if !found { + this := cd.thisRange + next := cd.nextRange + fmt.Printf("Looking for: [%q, %q, %v] in this level.\n", this.left, this.right, this.inf) + fmt.Printf("This Level:\n%s\n", thisLevel.debug()) + fmt.Println() + fmt.Printf("Looking for: [%q, %q, %v] in next level.\n", next.left, next.right, next.inf) + fmt.Printf("Next Level:\n%s\n", nextLevel.debug()) + log.Fatal("keyRange not found") + } +} diff --git a/vendor/github.com/dgraph-io/badger/contrib/cover.sh b/vendor/github.com/dgraph-io/badger/contrib/cover.sh new file mode 100644 index 00000000..5e2c179a --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/contrib/cover.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +SRC="$( cd -P "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/.." +TMP=$(mktemp /tmp/badger-coverage-XXXXX.txt) + +BUILD=$1 +OUT=$2 + +set -e + +pushd $SRC &> /dev/null + +# create coverage output +echo 'mode: atomic' > $OUT +for PKG in $(go list ./...|grep -v -E 'vendor'); do + go test -covermode=atomic -coverprofile=$TMP $PKG + tail -n +2 $TMP >> $OUT +done + +# Another round of tests after turning off mmap +go test -v -vlog_mmap=false github.com/dgraph-io/badger + +popd &> /dev/null diff --git a/vendor/github.com/dgraph-io/badger/db.go b/vendor/github.com/dgraph-io/badger/db.go new file mode 100644 index 00000000..8c374bdc --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/db.go @@ -0,0 +1,1296 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "bytes" + "encoding/binary" + "expvar" + "math" + "os" + "path/filepath" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + humanize "gx/ipfs/QmQMxG9D52TirZd9eLA37nxiNspnMRkKbyPWrVAa1gvtSy/go-humanize" + "gx/ipfs/QmRvYNctevGUW52urgmoFZscT6buMKqhHezLUS64WepGWn/go-net/trace" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/options" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/skl" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/table" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" + "gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors" +) + +var ( + badgerPrefix = []byte("!badger!") // Prefix for internal keys used by badger. + head = []byte("!badger!head") // For storing value offset for replay. + txnKey = []byte("!badger!txn") // For indicating end of entries in txn. + badgerMove = []byte("!badger!move") // For key-value pairs which got moved during GC. +) + +type closers struct { + updateSize *y.Closer + compactors *y.Closer + memtable *y.Closer + writes *y.Closer + valueGC *y.Closer +} + +// DB provides the various functions required to interact with Badger. +// DB is thread-safe. +type DB struct { + sync.RWMutex // Guards list of inmemory tables, not individual reads and writes. + + dirLockGuard *directoryLockGuard + // nil if Dir and ValueDir are the same + valueDirGuard *directoryLockGuard + + closers closers + elog trace.EventLog + mt *skl.Skiplist // Our latest (actively written) in-memory table + imm []*skl.Skiplist // Add here only AFTER pushing to flushChan. + opt Options + manifest *manifestFile + lc *levelsController + vlog valueLog + vhead valuePointer // less than or equal to a pointer to the last vlog value put into mt + writeCh chan *request + flushChan chan flushTask // For flushing memtables. + + blockWrites int32 + + orc *oracle +} + +const ( + kvWriteChCapacity = 1000 +) + +func (db *DB) replayFunction() func(Entry, valuePointer) error { + type txnEntry struct { + nk []byte + v y.ValueStruct + } + + var txn []txnEntry + var lastCommit uint64 + + toLSM := func(nk []byte, vs y.ValueStruct) { + for err := db.ensureRoomForWrite(); err != nil; err = db.ensureRoomForWrite() { + db.elog.Printf("Replay: Making room for writes") + time.Sleep(10 * time.Millisecond) + } + db.mt.Put(nk, vs) + } + + first := true + return func(e Entry, vp valuePointer) error { // Function for replaying. + if first { + db.elog.Printf("First key=%q\n", e.Key) + } + first = false + + if db.orc.nextTxnTs < y.ParseTs(e.Key) { + db.orc.nextTxnTs = y.ParseTs(e.Key) + } + + nk := make([]byte, len(e.Key)) + copy(nk, e.Key) + var nv []byte + meta := e.meta + if db.shouldWriteValueToLSM(e) { + nv = make([]byte, len(e.Value)) + copy(nv, e.Value) + } else { + nv = make([]byte, vptrSize) + vp.Encode(nv) + meta = meta | bitValuePointer + } + + v := y.ValueStruct{ + Value: nv, + Meta: meta, + UserMeta: e.UserMeta, + } + + if e.meta&bitFinTxn > 0 { + txnTs, err := strconv.ParseUint(string(e.Value), 10, 64) + if err != nil { + return errors.Wrapf(err, "Unable to parse txn fin: %q", e.Value) + } + y.AssertTrue(lastCommit == txnTs) + y.AssertTrue(len(txn) > 0) + // Got the end of txn. Now we can store them. + for _, t := range txn { + toLSM(t.nk, t.v) + } + txn = txn[:0] + lastCommit = 0 + + } else if e.meta&bitTxn > 0 { + txnTs := y.ParseTs(nk) + if lastCommit == 0 { + lastCommit = txnTs + } + if lastCommit != txnTs { + db.opt.Warningf("Found an incomplete txn at timestamp %d. Discarding it.\n", + lastCommit) + txn = txn[:0] + lastCommit = txnTs + } + te := txnEntry{nk: nk, v: v} + txn = append(txn, te) + + } else { + // This entry is from a rewrite. + toLSM(nk, v) + + // We shouldn't get this entry in the middle of a transaction. + y.AssertTrue(lastCommit == 0) + y.AssertTrue(len(txn) == 0) + } + return nil + } +} + +// Open returns a new DB object. +func Open(opt Options) (db *DB, err error) { + opt.maxBatchSize = (15 * opt.MaxTableSize) / 100 + opt.maxBatchCount = opt.maxBatchSize / int64(skl.MaxNodeSize) + + if opt.ValueThreshold > math.MaxUint16-16 { + return nil, ErrValueThreshold + } + + if opt.ReadOnly { + // Can't truncate if the DB is read only. + opt.Truncate = false + } + + for _, path := range []string{opt.Dir, opt.ValueDir} { + dirExists, err := exists(path) + if err != nil { + return nil, y.Wrapf(err, "Invalid Dir: %q", path) + } + if !dirExists { + if opt.ReadOnly { + return nil, y.Wrapf(err, "Cannot find Dir for read-only open: %q", path) + } + // Try to create the directory + err = os.Mkdir(path, 0700) + if err != nil { + return nil, y.Wrapf(err, "Error Creating Dir: %q", path) + } + } + } + absDir, err := filepath.Abs(opt.Dir) + if err != nil { + return nil, err + } + absValueDir, err := filepath.Abs(opt.ValueDir) + if err != nil { + return nil, err + } + var dirLockGuard, valueDirLockGuard *directoryLockGuard + dirLockGuard, err = acquireDirectoryLock(opt.Dir, lockFile, opt.ReadOnly) + if err != nil { + return nil, err + } + defer func() { + if dirLockGuard != nil { + _ = dirLockGuard.release() + } + }() + if absValueDir != absDir { + valueDirLockGuard, err = acquireDirectoryLock(opt.ValueDir, lockFile, opt.ReadOnly) + if err != nil { + return nil, err + } + defer func() { + if valueDirLockGuard != nil { + _ = valueDirLockGuard.release() + } + }() + } + if !(opt.ValueLogFileSize <= 2<<30 && opt.ValueLogFileSize >= 1<<20) { + return nil, ErrValueLogSize + } + if !(opt.ValueLogLoadingMode == options.FileIO || + opt.ValueLogLoadingMode == options.MemoryMap) { + return nil, ErrInvalidLoadingMode + } + manifestFile, manifest, err := openOrCreateManifestFile(opt.Dir, opt.ReadOnly) + if err != nil { + return nil, err + } + defer func() { + if manifestFile != nil { + _ = manifestFile.close() + } + }() + + db = &DB{ + imm: make([]*skl.Skiplist, 0, opt.NumMemtables), + flushChan: make(chan flushTask, opt.NumMemtables), + writeCh: make(chan *request, kvWriteChCapacity), + opt: opt, + manifest: manifestFile, + elog: trace.NewEventLog("Badger", "DB"), + dirLockGuard: dirLockGuard, + valueDirGuard: valueDirLockGuard, + orc: newOracle(opt), + } + + // Calculate initial size. + db.calculateSize() + db.closers.updateSize = y.NewCloser(1) + go db.updateSize(db.closers.updateSize) + db.mt = skl.NewSkiplist(arenaSize(opt)) + + // newLevelsController potentially loads files in directory. + if db.lc, err = newLevelsController(db, &manifest); err != nil { + return nil, err + } + + if !opt.ReadOnly { + db.closers.compactors = y.NewCloser(1) + db.lc.startCompact(db.closers.compactors) + + db.closers.memtable = y.NewCloser(1) + go db.flushMemtable(db.closers.memtable) // Need levels controller to be up. + } + + headKey := y.KeyWithTs(head, math.MaxUint64) + // Need to pass with timestamp, lsm get removes the last 8 bytes and compares key + vs, err := db.get(headKey) + if err != nil { + return nil, errors.Wrap(err, "Retrieving head") + } + db.orc.nextTxnTs = vs.Version + var vptr valuePointer + if len(vs.Value) > 0 { + vptr.Decode(vs.Value) + } + + replayCloser := y.NewCloser(1) + go db.doWrites(replayCloser) + + if err = db.vlog.open(db, vptr, db.replayFunction()); err != nil { + return db, err + } + replayCloser.SignalAndWait() // Wait for replay to be applied first. + + // Let's advance nextTxnTs to one more than whatever we observed via + // replaying the logs. + db.orc.txnMark.Done(db.orc.nextTxnTs) + db.orc.nextTxnTs++ + + db.writeCh = make(chan *request, kvWriteChCapacity) + db.closers.writes = y.NewCloser(1) + go db.doWrites(db.closers.writes) + + db.closers.valueGC = y.NewCloser(1) + go db.vlog.waitOnGC(db.closers.valueGC) + + valueDirLockGuard = nil + dirLockGuard = nil + manifestFile = nil + return db, nil +} + +// Close closes a DB. It's crucial to call it to ensure all the pending updates +// make their way to disk. Calling DB.Close() multiple times is not safe and would +// cause panic. +func (db *DB) Close() (err error) { + db.elog.Printf("Closing database") + atomic.StoreInt32(&db.blockWrites, 1) + + // Stop value GC first. + db.closers.valueGC.SignalAndWait() + + // Stop writes next. + db.closers.writes.SignalAndWait() + + // Now close the value log. + if vlogErr := db.vlog.Close(); err == nil { + err = errors.Wrap(vlogErr, "DB.Close") + } + + // Make sure that block writer is done pushing stuff into memtable! + // Otherwise, you will have a race condition: we are trying to flush memtables + // and remove them completely, while the block / memtable writer is still + // trying to push stuff into the memtable. This will also resolve the value + // offset problem: as we push into memtable, we update value offsets there. + if !db.mt.Empty() { + db.elog.Printf("Flushing memtable") + for { + pushedFlushTask := func() bool { + db.Lock() + defer db.Unlock() + y.AssertTrue(db.mt != nil) + select { + case db.flushChan <- flushTask{db.mt, db.vhead}: + db.imm = append(db.imm, db.mt) // Flusher will attempt to remove this from s.imm. + db.mt = nil // Will segfault if we try writing! + db.elog.Printf("pushed to flush chan\n") + return true + default: + // If we fail to push, we need to unlock and wait for a short while. + // The flushing operation needs to update s.imm. Otherwise, we have a deadlock. + // TODO: Think about how to do this more cleanly, maybe without any locks. + } + return false + }() + if pushedFlushTask { + break + } + time.Sleep(10 * time.Millisecond) + } + } + db.stopCompactions() + + // Force Compact L0 + // We don't need to care about cstatus since no parallel compaction is running. + if db.opt.CompactL0OnClose { + if err := db.lc.doCompact(compactionPriority{level: 0, score: 1.73}); err != nil { + db.opt.Warningf("While forcing compaction on level 0: %v", err) + } else { + db.opt.Infof("Force compaction on level 0 done") + } + } + + if lcErr := db.lc.close(); err == nil { + err = errors.Wrap(lcErr, "DB.Close") + } + db.elog.Printf("Waiting for closer") + db.closers.updateSize.SignalAndWait() + db.orc.Stop() + + db.elog.Finish() + + if db.dirLockGuard != nil { + if guardErr := db.dirLockGuard.release(); err == nil { + err = errors.Wrap(guardErr, "DB.Close") + } + } + if db.valueDirGuard != nil { + if guardErr := db.valueDirGuard.release(); err == nil { + err = errors.Wrap(guardErr, "DB.Close") + } + } + if manifestErr := db.manifest.close(); err == nil { + err = errors.Wrap(manifestErr, "DB.Close") + } + + // Fsync directories to ensure that lock file, and any other removed files whose directory + // we haven't specifically fsynced, are guaranteed to have their directory entry removal + // persisted to disk. + if syncErr := syncDir(db.opt.Dir); err == nil { + err = errors.Wrap(syncErr, "DB.Close") + } + if syncErr := syncDir(db.opt.ValueDir); err == nil { + err = errors.Wrap(syncErr, "DB.Close") + } + + return err +} + +const ( + lockFile = "LOCK" +) + +// When you create or delete a file, you have to ensure the directory entry for the file is synced +// in order to guarantee the file is visible (if the system crashes). (See the man page for fsync, +// or see https://github.com/coreos/etcd/issues/6368 for an example.) +func syncDir(dir string) error { + f, err := openDir(dir) + if err != nil { + return errors.Wrapf(err, "While opening directory: %s.", dir) + } + err = f.Sync() + closeErr := f.Close() + if err != nil { + return errors.Wrapf(err, "While syncing directory: %s.", dir) + } + return errors.Wrapf(closeErr, "While closing directory: %s.", dir) +} + +// getMemtables returns the current memtables and get references. +func (db *DB) getMemTables() ([]*skl.Skiplist, func()) { + db.RLock() + defer db.RUnlock() + + tables := make([]*skl.Skiplist, len(db.imm)+1) + + // Get mutable memtable. + tables[0] = db.mt + tables[0].IncrRef() + + // Get immutable memtables. + last := len(db.imm) - 1 + for i := range db.imm { + tables[i+1] = db.imm[last-i] + tables[i+1].IncrRef() + } + return tables, func() { + for _, tbl := range tables { + tbl.DecrRef() + } + } +} + +// get returns the value in memtable or disk for given key. +// Note that value will include meta byte. +// +// IMPORTANT: We should never write an entry with an older timestamp for the same key, We need to +// maintain this invariant to search for the latest value of a key, or else we need to search in all +// tables and find the max version among them. To maintain this invariant, we also need to ensure +// that all versions of a key are always present in the same table from level 1, because compaction +// can push any table down. +// +// Update (Sep 22, 2018): To maintain the above invariant, and to allow keys to be moved from one +// value log to another (while reclaiming space during value log GC), we have logically moved this +// need to write "old versions after new versions" to the badgerMove keyspace. Thus, for normal +// gets, we can stop going down the LSM tree once we find any version of the key (note however that +// we will ALWAYS skip versions with ts greater than the key version). However, if that key has +// been moved, then for the corresponding movekey, we'll look through all the levels of the tree +// to ensure that we pick the highest version of the movekey present. +func (db *DB) get(key []byte) (y.ValueStruct, error) { + tables, decr := db.getMemTables() // Lock should be released. + defer decr() + + var maxVs *y.ValueStruct + var version uint64 + if bytes.HasPrefix(key, badgerMove) { + // If we are checking badgerMove key, we should look into all the + // levels, so we can pick up the newer versions, which might have been + // compacted down the tree. + maxVs = &y.ValueStruct{} + version = y.ParseTs(key) + } + + y.NumGets.Add(1) + for i := 0; i < len(tables); i++ { + vs := tables[i].Get(key) + y.NumMemtableGets.Add(1) + if vs.Meta == 0 && vs.Value == nil { + continue + } + // Found a version of the key. For user keyspace, return immediately. For move keyspace, + // continue iterating, unless we found a version == given key version. + if maxVs == nil || vs.Version == version { + return vs, nil + } + if maxVs.Version < vs.Version { + *maxVs = vs + } + } + return db.lc.get(key, maxVs) +} + +func (db *DB) updateHead(ptrs []valuePointer) { + var ptr valuePointer + for i := len(ptrs) - 1; i >= 0; i-- { + p := ptrs[i] + if !p.IsZero() { + ptr = p + break + } + } + if ptr.IsZero() { + return + } + + db.Lock() + defer db.Unlock() + y.AssertTrue(!ptr.Less(db.vhead)) + db.vhead = ptr +} + +var requestPool = sync.Pool{ + New: func() interface{} { + return new(request) + }, +} + +func (db *DB) shouldWriteValueToLSM(e Entry) bool { + return len(e.Value) < db.opt.ValueThreshold +} + +func (db *DB) writeToLSM(b *request) error { + if len(b.Ptrs) != len(b.Entries) { + return errors.Errorf("Ptrs and Entries don't match: %+v", b) + } + + for i, entry := range b.Entries { + if entry.meta&bitFinTxn != 0 { + continue + } + if db.shouldWriteValueToLSM(*entry) { // Will include deletion / tombstone case. + db.mt.Put(entry.Key, + y.ValueStruct{ + Value: entry.Value, + Meta: entry.meta, + UserMeta: entry.UserMeta, + ExpiresAt: entry.ExpiresAt, + }) + } else { + var offsetBuf [vptrSize]byte + db.mt.Put(entry.Key, + y.ValueStruct{ + Value: b.Ptrs[i].Encode(offsetBuf[:]), + Meta: entry.meta | bitValuePointer, + UserMeta: entry.UserMeta, + ExpiresAt: entry.ExpiresAt, + }) + } + } + return nil +} + +// writeRequests is called serially by only one goroutine. +func (db *DB) writeRequests(reqs []*request) error { + if len(reqs) == 0 { + return nil + } + + done := func(err error) { + for _, r := range reqs { + r.Err = err + r.Wg.Done() + } + } + db.elog.Printf("writeRequests called. Writing to value log") + + err := db.vlog.write(reqs) + if err != nil { + done(err) + return err + } + + db.elog.Printf("Writing to memtable") + var count int + for _, b := range reqs { + if len(b.Entries) == 0 { + continue + } + count += len(b.Entries) + var i uint64 + for err := db.ensureRoomForWrite(); err == errNoRoom; err = db.ensureRoomForWrite() { + i++ + if i%100 == 0 { + db.elog.Printf("Making room for writes") + } + // We need to poll a bit because both hasRoomForWrite and the flusher need access to s.imm. + // When flushChan is full and you are blocked there, and the flusher is trying to update s.imm, + // you will get a deadlock. + time.Sleep(10 * time.Millisecond) + } + if err != nil { + done(err) + return errors.Wrap(err, "writeRequests") + } + if err := db.writeToLSM(b); err != nil { + done(err) + return errors.Wrap(err, "writeRequests") + } + db.updateHead(b.Ptrs) + } + done(nil) + db.elog.Printf("%d entries written", count) + return nil +} + +func (db *DB) sendToWriteCh(entries []*Entry) (*request, error) { + if atomic.LoadInt32(&db.blockWrites) == 1 { + return nil, ErrBlockedWrites + } + var count, size int64 + for _, e := range entries { + size += int64(e.estimateSize(db.opt.ValueThreshold)) + count++ + } + if count >= db.opt.maxBatchCount || size >= db.opt.maxBatchSize { + return nil, ErrTxnTooBig + } + + // We can only service one request because we need each txn to be stored in a contigous section. + // Txns should not interleave among other txns or rewrites. + req := requestPool.Get().(*request) + req.Entries = entries + req.Wg = sync.WaitGroup{} + req.Wg.Add(1) + db.writeCh <- req // Handled in doWrites. + y.NumPuts.Add(int64(len(entries))) + + return req, nil +} + +func (db *DB) doWrites(lc *y.Closer) { + defer lc.Done() + pendingCh := make(chan struct{}, 1) + + writeRequests := func(reqs []*request) { + if err := db.writeRequests(reqs); err != nil { + db.opt.Errorf("writeRequests: %v", err) + } + <-pendingCh + } + + // This variable tracks the number of pending writes. + reqLen := new(expvar.Int) + y.PendingWrites.Set(db.opt.Dir, reqLen) + + reqs := make([]*request, 0, 10) + for { + var r *request + select { + case r = <-db.writeCh: + case <-lc.HasBeenClosed(): + goto closedCase + } + + for { + reqs = append(reqs, r) + reqLen.Set(int64(len(reqs))) + + if len(reqs) >= 3*kvWriteChCapacity { + pendingCh <- struct{}{} // blocking. + goto writeCase + } + + select { + // Either push to pending, or continue to pick from writeCh. + case r = <-db.writeCh: + case pendingCh <- struct{}{}: + goto writeCase + case <-lc.HasBeenClosed(): + goto closedCase + } + } + + closedCase: + close(db.writeCh) + for r := range db.writeCh { // Flush the channel. + reqs = append(reqs, r) + } + + pendingCh <- struct{}{} // Push to pending before doing a write. + writeRequests(reqs) + return + + writeCase: + go writeRequests(reqs) + reqs = make([]*request, 0, 10) + reqLen.Set(0) + } +} + +// batchSet applies a list of badger.Entry. If a request level error occurs it +// will be returned. +// Check(kv.BatchSet(entries)) +func (db *DB) batchSet(entries []*Entry) error { + req, err := db.sendToWriteCh(entries) + if err != nil { + return err + } + + return req.Wait() +} + +// batchSetAsync is the asynchronous version of batchSet. It accepts a callback +// function which is called when all the sets are complete. If a request level +// error occurs, it will be passed back via the callback. +// err := kv.BatchSetAsync(entries, func(err error)) { +// Check(err) +// } +func (db *DB) batchSetAsync(entries []*Entry, f func(error)) error { + req, err := db.sendToWriteCh(entries) + if err != nil { + return err + } + go func() { + err := req.Wait() + // Write is complete. Let's call the callback function now. + f(err) + }() + return nil +} + +var errNoRoom = errors.New("No room for write") + +// ensureRoomForWrite is always called serially. +func (db *DB) ensureRoomForWrite() error { + var err error + db.Lock() + defer db.Unlock() + if db.mt.MemSize() < db.opt.MaxTableSize { + return nil + } + + y.AssertTrue(db.mt != nil) // A nil mt indicates that DB is being closed. + select { + case db.flushChan <- flushTask{db.mt, db.vhead}: + db.elog.Printf("Flushing value log to disk if async mode.") + // Ensure value log is synced to disk so this memtable's contents wouldn't be lost. + err = db.vlog.sync() + if err != nil { + return err + } + + db.elog.Printf("Flushing memtable, mt.size=%d size of flushChan: %d\n", + db.mt.MemSize(), len(db.flushChan)) + // We manage to push this task. Let's modify imm. + db.imm = append(db.imm, db.mt) + db.mt = skl.NewSkiplist(arenaSize(db.opt)) + // New memtable is empty. We certainly have room. + return nil + default: + // We need to do this to unlock and allow the flusher to modify imm. + return errNoRoom + } +} + +func arenaSize(opt Options) int64 { + return opt.MaxTableSize + opt.maxBatchSize + opt.maxBatchCount*int64(skl.MaxNodeSize) +} + +// WriteLevel0Table flushes memtable. +func writeLevel0Table(s *skl.Skiplist, f *os.File) error { + iter := s.NewIterator() + defer iter.Close() + b := table.NewTableBuilder() + defer b.Close() + for iter.SeekToFirst(); iter.Valid(); iter.Next() { + if err := b.Add(iter.Key(), iter.Value()); err != nil { + return err + } + } + _, err := f.Write(b.Finish()) + return err +} + +type flushTask struct { + mt *skl.Skiplist + vptr valuePointer +} + +// handleFlushTask must be run serially. +func (db *DB) handleFlushTask(ft flushTask) error { + if !ft.mt.Empty() { + // Store badger head even if vptr is zero, need it for readTs + db.opt.Infof("Storing value log head: %+v\n", ft.vptr) + db.elog.Printf("Storing offset: %+v\n", ft.vptr) + offset := make([]byte, vptrSize) + ft.vptr.Encode(offset) + + // Pick the max commit ts, so in case of crash, our read ts would be higher than all the + // commits. + headTs := y.KeyWithTs(head, db.orc.nextTs()) + ft.mt.Put(headTs, y.ValueStruct{Value: offset}) + } + + fileID := db.lc.reserveFileID() + fd, err := y.CreateSyncedFile(table.NewFilename(fileID, db.opt.Dir), true) + if err != nil { + return y.Wrap(err) + } + + // Don't block just to sync the directory entry. + dirSyncCh := make(chan error) + go func() { dirSyncCh <- syncDir(db.opt.Dir) }() + + err = writeLevel0Table(ft.mt, fd) + dirSyncErr := <-dirSyncCh + + if err != nil { + db.elog.Errorf("ERROR while writing to level 0: %v", err) + return err + } + if dirSyncErr != nil { + // Do dir sync as best effort. No need to return due to an error there. + db.elog.Errorf("ERROR while syncing level directory: %v", dirSyncErr) + } + + tbl, err := table.OpenTable(fd, db.opt.TableLoadingMode, nil) + if err != nil { + db.elog.Printf("ERROR while opening table: %v", err) + return err + } + // We own a ref on tbl. + err = db.lc.addLevel0Table(tbl) // This will incrRef (if we don't error, sure) + tbl.DecrRef() // Releases our ref. + if err != nil { + return err + } + + // Update s.imm. Need a lock. + db.Lock() + defer db.Unlock() + // This is a single-threaded operation. ft.mt corresponds to the head of + // db.imm list. Once we flush it, we advance db.imm. The next ft.mt + // which would arrive here would match db.imm[0], because we acquire a + // lock over DB when pushing to flushChan. + // TODO: This logic is dirty AF. Any change and this could easily break. + y.AssertTrue(ft.mt == db.imm[0]) + db.imm = db.imm[1:] + ft.mt.DecrRef() // Return memory. + return nil +} + +// flushMemtable must keep running until we send it an empty flushTask. If there +// are errors during handling the flush task, we'll retry indefinitely. +func (db *DB) flushMemtable(lc *y.Closer) error { + defer lc.Done() + + for ft := range db.flushChan { + if ft.mt == nil { + // We close db.flushChan now, instead of sending a nil ft.mt. + continue + } + for { + err := db.handleFlushTask(ft) + if err == nil { + break + } + // Encountered error. Retry indefinitely. + db.opt.Errorf("Failure while flushing memtable to disk: %v. Retrying...\n", err) + time.Sleep(time.Second) + } + } + return nil +} + +func exists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return true, err +} + +// This function does a filewalk, calculates the size of vlog and sst files and stores it in +// y.LSMSize and y.VlogSize. +func (db *DB) calculateSize() { + newInt := func(val int64) *expvar.Int { + v := new(expvar.Int) + v.Add(val) + return v + } + + totalSize := func(dir string) (int64, int64) { + var lsmSize, vlogSize int64 + err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + ext := filepath.Ext(path) + if ext == ".sst" { + lsmSize += info.Size() + } else if ext == ".vlog" { + vlogSize += info.Size() + } + return nil + }) + if err != nil { + db.elog.Printf("Got error while calculating total size of directory: %s", dir) + } + return lsmSize, vlogSize + } + + lsmSize, vlogSize := totalSize(db.opt.Dir) + y.LSMSize.Set(db.opt.Dir, newInt(lsmSize)) + // If valueDir is different from dir, we'd have to do another walk. + if db.opt.ValueDir != db.opt.Dir { + _, vlogSize = totalSize(db.opt.ValueDir) + } + y.VlogSize.Set(db.opt.Dir, newInt(vlogSize)) +} + +func (db *DB) updateSize(lc *y.Closer) { + defer lc.Done() + + metricsTicker := time.NewTicker(time.Minute) + defer metricsTicker.Stop() + + for { + select { + case <-metricsTicker.C: + db.calculateSize() + case <-lc.HasBeenClosed(): + return + } + } +} + +// RunValueLogGC triggers a value log garbage collection. +// +// It picks value log files to perform GC based on statistics that are collected +// duing compactions. If no such statistics are available, then log files are +// picked in random order. The process stops as soon as the first log file is +// encountered which does not result in garbage collection. +// +// When a log file is picked, it is first sampled. If the sample shows that we +// can discard at least discardRatio space of that file, it would be rewritten. +// +// If a call to RunValueLogGC results in no rewrites, then an ErrNoRewrite is +// thrown indicating that the call resulted in no file rewrites. +// +// We recommend setting discardRatio to 0.5, thus indicating that a file be +// rewritten if half the space can be discarded. This results in a lifetime +// value log write amplification of 2 (1 from original write + 0.5 rewrite + +// 0.25 + 0.125 + ... = 2). Setting it to higher value would result in fewer +// space reclaims, while setting it to a lower value would result in more space +// reclaims at the cost of increased activity on the LSM tree. discardRatio +// must be in the range (0.0, 1.0), both endpoints excluded, otherwise an +// ErrInvalidRequest is returned. +// +// Only one GC is allowed at a time. If another value log GC is running, or DB +// has been closed, this would return an ErrRejected. +// +// Note: Every time GC is run, it would produce a spike of activity on the LSM +// tree. +func (db *DB) RunValueLogGC(discardRatio float64) error { + if discardRatio >= 1.0 || discardRatio <= 0.0 { + return ErrInvalidRequest + } + + // Find head on disk + headKey := y.KeyWithTs(head, math.MaxUint64) + // Need to pass with timestamp, lsm get removes the last 8 bytes and compares key + val, err := db.lc.get(headKey, nil) + if err != nil { + return errors.Wrap(err, "Retrieving head from on-disk LSM") + } + + var head valuePointer + if len(val.Value) > 0 { + head.Decode(val.Value) + } + + // Pick a log file and run GC + return db.vlog.runGC(discardRatio, head) +} + +// Size returns the size of lsm and value log files in bytes. It can be used to decide how often to +// call RunValueLogGC. +func (db *DB) Size() (lsm int64, vlog int64) { + if y.LSMSize.Get(db.opt.Dir) == nil { + lsm, vlog = 0, 0 + return + } + lsm = y.LSMSize.Get(db.opt.Dir).(*expvar.Int).Value() + vlog = y.VlogSize.Get(db.opt.Dir).(*expvar.Int).Value() + return +} + +// Sequence represents a Badger sequence. +type Sequence struct { + sync.Mutex + db *DB + key []byte + next uint64 + leased uint64 + bandwidth uint64 +} + +// Next would return the next integer in the sequence, updating the lease by running a transaction +// if needed. +func (seq *Sequence) Next() (uint64, error) { + seq.Lock() + defer seq.Unlock() + if seq.next >= seq.leased { + if err := seq.updateLease(); err != nil { + return 0, err + } + } + val := seq.next + seq.next++ + return val, nil +} + +// Release the leased sequence to avoid wasted integers. This should be done right +// before closing the associated DB. However it is valid to use the sequence after +// it was released, causing a new lease with full bandwidth. +func (seq *Sequence) Release() error { + seq.Lock() + defer seq.Unlock() + err := seq.db.Update(func(txn *Txn) error { + var buf [8]byte + binary.BigEndian.PutUint64(buf[:], seq.next) + return txn.Set(seq.key, buf[:]) + }) + if err != nil { + return err + } + seq.leased = seq.next + return nil +} + +func (seq *Sequence) updateLease() error { + return seq.db.Update(func(txn *Txn) error { + item, err := txn.Get(seq.key) + if err == ErrKeyNotFound { + seq.next = 0 + } else if err != nil { + return err + } else { + var num uint64 + if err := item.Value(func(v []byte) error { + num = binary.BigEndian.Uint64(v) + return nil + }); err != nil { + return err + } + seq.next = num + } + + lease := seq.next + seq.bandwidth + var buf [8]byte + binary.BigEndian.PutUint64(buf[:], lease) + if err = txn.Set(seq.key, buf[:]); err != nil { + return err + } + seq.leased = lease + return nil + }) +} + +// GetSequence would initiate a new sequence object, generating it from the stored lease, if +// available, in the database. Sequence can be used to get a list of monotonically increasing +// integers. Multiple sequences can be created by providing different keys. Bandwidth sets the +// size of the lease, determining how many Next() requests can be served from memory. +// +// GetSequence is not supported on ManagedDB. Calling this would result in a panic. +func (db *DB) GetSequence(key []byte, bandwidth uint64) (*Sequence, error) { + if db.opt.managedTxns { + panic("Cannot use GetSequence with managedDB=true.") + } + + switch { + case len(key) == 0: + return nil, ErrEmptyKey + case bandwidth == 0: + return nil, ErrZeroBandwidth + } + seq := &Sequence{ + db: db, + key: key, + next: 0, + leased: 0, + bandwidth: bandwidth, + } + err := seq.updateLease() + return seq, err +} + +// Tables gets the TableInfo objects from the level controller. +func (db *DB) Tables() []TableInfo { + return db.lc.getTableInfo() +} + +// KeySplits can be used to get rough key ranges to divide up iteration over +// the DB. +func (db *DB) KeySplits(prefix []byte) []string { + var splits []string + for _, ti := range db.Tables() { + // We don't use ti.Left, because that has a tendency to store !badger + // keys. + if bytes.HasPrefix(ti.Right, prefix) { + splits = append(splits, string(ti.Right)) + } + } + sort.Strings(splits) + return splits +} + +// MaxBatchCount returns max possible entries in batch +func (db *DB) MaxBatchCount() int64 { + return db.opt.maxBatchCount +} + +// MaxBatchSize returns max possible batch size +func (db *DB) MaxBatchSize() int64 { + return db.opt.maxBatchSize +} + +func (db *DB) stopCompactions() { + // Stop memtable flushes. + if db.closers.memtable != nil { + close(db.flushChan) + db.closers.memtable.SignalAndWait() + } + // Stop compactions. + if db.closers.compactors != nil { + db.closers.compactors.SignalAndWait() + } +} + +func (db *DB) startCompactions() { + // Resume compactions. + if db.closers.compactors != nil { + db.closers.compactors = y.NewCloser(1) + db.lc.startCompact(db.closers.compactors) + } + if db.closers.memtable != nil { + db.flushChan = make(chan flushTask, db.opt.NumMemtables) + db.closers.memtable = y.NewCloser(1) + go db.flushMemtable(db.closers.memtable) + } +} + +// Flatten can be used to force compactions on the LSM tree so all the tables fall on the same +// level. This ensures that all the versions of keys are colocated and not split across multiple +// levels, which is necessary after a restore from backup. During Flatten, live compactions are +// stopped. Ideally, no writes are going on during Flatten. Otherwise, it would create competition +// between flattening the tree and new tables being created at level zero. +func (db *DB) Flatten(workers int) error { + db.stopCompactions() + defer db.startCompactions() + + compactAway := func(cp compactionPriority) error { + db.opt.Infof("Attempting to compact with %+v\n", cp) + errCh := make(chan error, 1) + for i := 0; i < workers; i++ { + go func() { + errCh <- db.lc.doCompact(cp) + }() + } + var success int + var rerr error + for i := 0; i < workers; i++ { + err := <-errCh + if err != nil { + rerr = err + db.opt.Warningf("While running doCompact with %+v. Error: %v\n", cp, err) + } else { + success++ + } + } + if success == 0 { + return rerr + } + // We could do at least one successful compaction. So, we'll consider this a success. + db.opt.Infof("%d compactor(s) succeeded. One or more tables from level %d compacted.\n", + success, cp.level) + return nil + } + + hbytes := func(sz int64) string { + return humanize.Bytes(uint64(sz)) + } + + for { + db.opt.Infof("\n") + var levels []int + for i, l := range db.lc.levels { + sz := l.getTotalSize() + db.opt.Infof("Level: %d. %8s Size. %8s Max.\n", + i, hbytes(l.getTotalSize()), hbytes(l.maxTotalSize)) + if sz > 0 { + levels = append(levels, i) + } + } + if len(levels) <= 1 { + prios := db.lc.pickCompactLevels() + if len(prios) == 0 || prios[0].score <= 1.0 { + db.opt.Infof("All tables consolidated into one level. Flattening done.\n") + return nil + } + if err := compactAway(prios[0]); err != nil { + return err + } + continue + } + // Create an artificial compaction priority, to ensure that we compact the level. + cp := compactionPriority{level: levels[0], score: 1.71} + if err := compactAway(cp); err != nil { + return err + } + } +} + +// DropAll would drop all the data stored in Badger. It does this in the following way. +// - Stop accepting new writes. +// - Pause memtable flushes and compactions. +// - Pick all tables from all levels, create a changeset to delete all these +// tables and apply it to manifest. +// - Pick all log files from value log, and delete all of them. Restart value log files from zero. +// - Resume memtable flushes and compactions. +// +// NOTE: DropAll is resilient to concurrent writes, but not to reads. It is up to the user to not do +// any reads while DropAll is going on, otherwise they may result in panics. Ideally, both reads and +// writes are paused before running DropAll, and resumed after it is finished. +func (db *DB) DropAll() error { + if db.opt.ReadOnly { + panic("Attempting to drop data in read-only mode.") + } + db.opt.Infof("DropAll called. Blocking writes...") + // Stop accepting new writes. + atomic.StoreInt32(&db.blockWrites, 1) + + // Make all pending writes finish. The following will also close writeCh. + db.closers.writes.SignalAndWait() + db.opt.Infof("Writes flushed. Stopping compactions now...") + + // Stop all compactions. + db.stopCompactions() + defer func() { + db.opt.Infof("Resuming writes") + db.startCompactions() + + db.writeCh = make(chan *request, kvWriteChCapacity) + db.closers.writes = y.NewCloser(1) + go db.doWrites(db.closers.writes) + + // Resume writes. + atomic.StoreInt32(&db.blockWrites, 0) + }() + db.opt.Infof("Compactions stopped. Dropping all SSTables...") + + // Block all foreign interactions with memory tables. + db.Lock() + defer db.Unlock() + + // Remove inmemory tables. Calling DecrRef for safety. Not sure if they're absolutely needed. + db.mt.DecrRef() + db.mt = skl.NewSkiplist(arenaSize(db.opt)) // Set it up for future writes. + for _, mt := range db.imm { + mt.DecrRef() + } + db.imm = db.imm[:0] + + num, err := db.lc.deleteLSMTree() + if err != nil { + return err + } + db.opt.Infof("Deleted %d SSTables. Now deleting value logs...\n", num) + + num, err = db.vlog.dropAll() + if err != nil { + return err + } + db.vhead = valuePointer{} // Zero it out. + db.opt.Infof("Deleted %d value log files. DropAll done.\n", num) + return nil +} diff --git a/vendor/github.com/dgraph-io/badger/db2_test.go b/vendor/github.com/dgraph-io/badger/db2_test.go new file mode 100644 index 00000000..03a7b265 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/db2_test.go @@ -0,0 +1,325 @@ +/* + * Copyright 2018 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "flag" + "fmt" + "io/ioutil" + "log" + "math/rand" + "os" + "path" + "regexp" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestTruncateVlogWithClose(t *testing.T) { + key := func(i int) []byte { + return []byte(fmt.Sprintf("%d%10d", i, i)) + } + data := func(l int) []byte { + m := make([]byte, l) + _, err := rand.Read(m) + require.NoError(t, err) + return m + } + + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + + opt := getTestOptions(dir) + opt.SyncWrites = true + opt.Truncate = true + opt.ValueThreshold = 1 // Force all reads from value log. + + db, err := Open(opt) + require.NoError(t, err) + + err = db.Update(func(txn *Txn) error { + return txn.Set(key(0), data(4055)) + }) + require.NoError(t, err) + + // Close the DB. + require.NoError(t, db.Close()) + require.NoError(t, os.Truncate(path.Join(dir, "000000.vlog"), 4096)) + + // Reopen and write some new data. + db, err = Open(opt) + require.NoError(t, err) + for i := 0; i < 32; i++ { + err := db.Update(func(txn *Txn) error { + return txn.Set(key(i), data(10)) + }) + require.NoError(t, err) + } + // Read it back to ensure that we can read it now. + for i := 0; i < 32; i++ { + err := db.View(func(txn *Txn) error { + item, err := txn.Get(key(i)) + require.NoError(t, err) + val := getItemValue(t, item) + require.Equal(t, 10, len(val)) + return nil + }) + require.NoError(t, err) + } + require.NoError(t, db.Close()) + + // Reopen and read the data again. + db, err = Open(opt) + require.NoError(t, err) + for i := 0; i < 32; i++ { + err := db.View(func(txn *Txn) error { + item, err := txn.Get(key(i)) + require.NoError(t, err) + val := getItemValue(t, item) + require.Equal(t, 10, len(val)) + return nil + }) + require.NoError(t, err) + } + require.NoError(t, db.Close()) +} + +var manual = flag.Bool("manual", false, "Set when manually running some tests.") + +// The following 3 TruncateVlogNoClose tests should be run one after another. +// None of these close the DB, simulating a crash. They should be run with a +// script, which truncates the value log to 4096, lining up with the end of the +// first entry in the txn. At <4096, it would cause the entry to be truncated +// immediately, at >4096, same thing. +func TestTruncateVlogNoClose(t *testing.T) { + if !*manual { + t.Skip("Skipping test meant to be run manually.") + return + } + fmt.Println("running") + dir := "p" + opts := getTestOptions(dir) + opts.SyncWrites = true + opts.Truncate = true + + kv, err := Open(opts) + require.NoError(t, err) + key := func(i int) string { + return fmt.Sprintf("%d%10d", i, i) + } + data := fmt.Sprintf("%4055d", 1) + err = kv.Update(func(txn *Txn) error { + return txn.Set([]byte(key(0)), []byte(data)) + }) + require.NoError(t, err) +} +func TestTruncateVlogNoClose2(t *testing.T) { + if !*manual { + t.Skip("Skipping test meant to be run manually.") + return + } + dir := "p" + opts := getTestOptions(dir) + opts.SyncWrites = true + opts.Truncate = true + + kv, err := Open(opts) + require.NoError(t, err) + key := func(i int) string { + return fmt.Sprintf("%d%10d", i, i) + } + data := fmt.Sprintf("%10d", 1) + for i := 32; i < 64; i++ { + err := kv.Update(func(txn *Txn) error { + return txn.Set([]byte(key(i)), []byte(data)) + }) + require.NoError(t, err) + } + for i := 32; i < 64; i++ { + require.NoError(t, kv.View(func(txn *Txn) error { + item, err := txn.Get([]byte(key(i))) + require.NoError(t, err) + val := getItemValue(t, item) + require.NotNil(t, val) + require.True(t, len(val) > 0) + return nil + })) + } +} +func TestTruncateVlogNoClose3(t *testing.T) { + if !*manual { + t.Skip("Skipping test meant to be run manually.") + return + } + fmt.Print("Running") + dir := "p" + opts := getTestOptions(dir) + opts.SyncWrites = true + opts.Truncate = true + + kv, err := Open(opts) + require.NoError(t, err) + key := func(i int) string { + return fmt.Sprintf("%d%10d", i, i) + } + for i := 32; i < 64; i++ { + require.NoError(t, kv.View(func(txn *Txn) error { + item, err := txn.Get([]byte(key(i))) + require.NoError(t, err) + val := getItemValue(t, item) + require.NotNil(t, val) + require.True(t, len(val) > 0) + return nil + })) + } +} + +func TestBigKeyValuePairs(t *testing.T) { + // This test takes too much memory. So, run separately. + if !*manual { + t.Skip("Skipping test meant to be run manually.") + return + } + opts := DefaultOptions + opts.MaxTableSize = 1 << 20 + opts.ValueLogMaxEntries = 64 + runBadgerTest(t, &opts, func(t *testing.T, db *DB) { + bigK := make([]byte, 65001) + bigV := make([]byte, db.opt.ValueLogFileSize+1) + small := make([]byte, 65000) + + txn := db.NewTransaction(true) + require.Regexp(t, regexp.MustCompile("Key.*exceeded"), txn.Set(bigK, small)) + require.Regexp(t, regexp.MustCompile("Value.*exceeded"), txn.Set(small, bigV)) + + require.NoError(t, txn.Set(small, small)) + require.Regexp(t, regexp.MustCompile("Key.*exceeded"), txn.Set(bigK, bigV)) + + require.NoError(t, db.View(func(txn *Txn) error { + _, err := txn.Get(small) + require.Equal(t, ErrKeyNotFound, err) + return nil + })) + + // Now run a longer test, which involves value log GC. + data := fmt.Sprintf("%100d", 1) + key := func(i int) string { + return fmt.Sprintf("%65000d", i) + } + + saveByKey := func(key string, value []byte) error { + return db.Update(func(txn *Txn) error { + return txn.Set([]byte(key), value) + }) + } + + getByKey := func(key string) error { + return db.View(func(txn *Txn) error { + item, err := txn.Get([]byte(key)) + if err != nil { + return err + } + return item.Value(func(val []byte) error { + if len(val) == 0 { + log.Fatalf("key not found %q", len(key)) + } + return nil + }) + }) + } + + for i := 0; i < 32; i++ { + if i < 30 { + require.NoError(t, saveByKey(key(i), []byte(data))) + } else { + require.NoError(t, saveByKey(key(i), []byte(fmt.Sprintf("%100d", i)))) + } + } + + for j := 0; j < 5; j++ { + for i := 0; i < 32; i++ { + if i < 30 { + require.NoError(t, saveByKey(key(i), []byte(data))) + } else { + require.NoError(t, saveByKey(key(i), []byte(fmt.Sprintf("%100d", i)))) + } + } + } + + for i := 0; i < 32; i++ { + require.NoError(t, getByKey(key(i))) + } + + var loops int + var err error + for err == nil { + err = db.RunValueLogGC(0.5) + require.NotRegexp(t, regexp.MustCompile("truncate"), err) + loops++ + } + t.Logf("Ran value log GC %d times. Last error: %v\n", loops, err) + }) +} + +// The following test checks for issue #585. +func TestPushValueLogLimit(t *testing.T) { + // This test takes too much memory. So, run separately. + if !*manual { + t.Skip("Skipping test meant to be run manually.") + return + } + opt := DefaultOptions + opt.ValueLogMaxEntries = 64 + opt.ValueLogFileSize = 2 << 30 + runBadgerTest(t, &opt, func(t *testing.T, db *DB) { + data := []byte(fmt.Sprintf("%30d", 1)) + key := func(i int) string { + return fmt.Sprintf("%100d", i) + } + + for i := 0; i < 32; i++ { + if i == 4 { + v := make([]byte, 2<<30) + err := db.Update(func(txn *Txn) error { + return txn.Set([]byte(key(i)), v) + }) + require.NoError(t, err) + } else { + err := db.Update(func(txn *Txn) error { + return txn.Set([]byte(key(i)), data) + }) + require.NoError(t, err) + } + } + + for i := 0; i < 32; i++ { + err := db.View(func(txn *Txn) error { + item, err := txn.Get([]byte(key(i))) + require.NoError(t, err, "Getting key: %s", key(i)) + err = item.Value(func(v []byte) error { + _ = v + return nil + }) + require.NoError(t, err, "Getting value: %s", key(i)) + return nil + }) + require.NoError(t, err) + } + }) +} diff --git a/vendor/github.com/dgraph-io/badger/db_test.go b/vendor/github.com/dgraph-io/badger/db_test.go new file mode 100644 index 00000000..4d7c05bf --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/db_test.go @@ -0,0 +1,1708 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "bytes" + "encoding/binary" + "flag" + "fmt" + "io/ioutil" + "log" + "math" + "math/rand" + "net/http" + "os" + "path/filepath" + "runtime" + "sort" + "sync" + "testing" + "time" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/options" + + "github.com/stretchr/testify/require" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" +) + +var mmap = flag.Bool("vlog_mmap", true, "Specify if value log must be memory-mapped") + +func getTestOptions(dir string) Options { + opt := DefaultOptions + opt.MaxTableSize = 1 << 15 // Force more compaction. + opt.LevelOneSize = 4 << 15 // Force more compaction. + opt.Dir = dir + opt.ValueDir = dir + opt.SyncWrites = false + if !*mmap { + opt.ValueLogLoadingMode = options.FileIO + } + return opt +} + +func getItemValue(t *testing.T, item *Item) (val []byte) { + t.Helper() + var v []byte + size := item.ValueSize() + err := item.Value(func(val []byte) error { + if val == nil { + v = nil + } else { + v = append([]byte{}, val...) + } + return nil + }) + if err != nil { + t.Error(err) + } + if int64(len(v)) != size { + t.Errorf("incorrect size: expected %d, got %d", len(v), size) + } + if v == nil { + return nil + } + another, err := item.ValueCopy(nil) + require.NoError(t, err) + require.Equal(t, v, another) + return v +} + +func txnSet(t *testing.T, kv *DB, key []byte, val []byte, meta byte) { + txn := kv.NewTransaction(true) + require.NoError(t, txn.SetWithMeta(key, val, meta)) + require.NoError(t, txn.Commit()) +} + +func txnDelete(t *testing.T, kv *DB, key []byte) { + txn := kv.NewTransaction(true) + require.NoError(t, txn.Delete(key)) + require.NoError(t, txn.Commit()) +} + +// Opens a badger db and runs a a test on it. +func runBadgerTest(t *testing.T, opts *Options, test func(t *testing.T, db *DB)) { + dir, err := ioutil.TempDir(".", "badger-test") + require.NoError(t, err) + defer os.RemoveAll(dir) + if opts == nil { + opts = new(Options) + *opts = getTestOptions(dir) + } else { + opts.Dir = dir + opts.ValueDir = dir + } + db, err := Open(*opts) + require.NoError(t, err) + defer db.Close() + test(t, db) +} + +func TestWrite(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + for i := 0; i < 100; i++ { + txnSet(t, db, []byte(fmt.Sprintf("key%d", i)), []byte(fmt.Sprintf("val%d", i)), 0x00) + } + }) +} + +func TestUpdateAndView(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + err := db.Update(func(txn *Txn) error { + for i := 0; i < 10; i++ { + err := txn.Set([]byte(fmt.Sprintf("key%d", i)), []byte(fmt.Sprintf("val%d", i))) + if err != nil { + return err + } + } + return nil + }) + require.NoError(t, err) + + err = db.View(func(txn *Txn) error { + for i := 0; i < 10; i++ { + item, err := txn.Get([]byte(fmt.Sprintf("key%d", i))) + if err != nil { + return err + } + + expected := []byte(fmt.Sprintf("val%d", i)) + if err := item.Value(func(val []byte) error { + require.Equal(t, expected, val, + "Invalid value for key %q. expected: %q, actual: %q", + item.Key(), expected, val) + return nil + }); err != nil { + return err + } + } + return nil + }) + require.NoError(t, err) + }) +} + +func TestConcurrentWrite(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + // Not a benchmark. Just a simple test for concurrent writes. + n := 20 + m := 500 + var wg sync.WaitGroup + for i := 0; i < n; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + for j := 0; j < m; j++ { + txnSet(t, db, []byte(fmt.Sprintf("k%05d_%08d", i, j)), + []byte(fmt.Sprintf("v%05d_%08d", i, j)), byte(j%127)) + } + }(i) + } + wg.Wait() + + t.Log("Starting iteration") + + opt := IteratorOptions{} + opt.Reverse = false + opt.PrefetchSize = 10 + opt.PrefetchValues = true + + txn := db.NewTransaction(true) + it := txn.NewIterator(opt) + defer it.Close() + var i, j int + for it.Rewind(); it.Valid(); it.Next() { + item := it.Item() + k := item.Key() + if k == nil { + break // end of iteration. + } + + require.EqualValues(t, fmt.Sprintf("k%05d_%08d", i, j), string(k)) + v := getItemValue(t, item) + require.EqualValues(t, fmt.Sprintf("v%05d_%08d", i, j), string(v)) + require.Equal(t, item.UserMeta(), byte(j%127)) + j++ + if j == m { + i++ + j = 0 + } + } + require.EqualValues(t, n, i) + require.EqualValues(t, 0, j) + }) +} + +func TestGet(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + txnSet(t, db, []byte("key1"), []byte("val1"), 0x08) + + txn := db.NewTransaction(false) + item, err := txn.Get([]byte("key1")) + require.NoError(t, err) + require.EqualValues(t, "val1", getItemValue(t, item)) + require.Equal(t, byte(0x08), item.UserMeta()) + txn.Discard() + + txnSet(t, db, []byte("key1"), []byte("val2"), 0x09) + + txn = db.NewTransaction(false) + item, err = txn.Get([]byte("key1")) + require.NoError(t, err) + require.EqualValues(t, "val2", getItemValue(t, item)) + require.Equal(t, byte(0x09), item.UserMeta()) + txn.Discard() + + txnDelete(t, db, []byte("key1")) + + txn = db.NewTransaction(false) + _, err = txn.Get([]byte("key1")) + require.Equal(t, ErrKeyNotFound, err) + txn.Discard() + + txnSet(t, db, []byte("key1"), []byte("val3"), 0x01) + + txn = db.NewTransaction(false) + item, err = txn.Get([]byte("key1")) + require.NoError(t, err) + require.EqualValues(t, "val3", getItemValue(t, item)) + require.Equal(t, byte(0x01), item.UserMeta()) + + longVal := make([]byte, 1000) + txnSet(t, db, []byte("key1"), longVal, 0x00) + + txn = db.NewTransaction(false) + item, err = txn.Get([]byte("key1")) + require.NoError(t, err) + require.EqualValues(t, longVal, getItemValue(t, item)) + txn.Discard() + }) +} + +func TestGetAfterDelete(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + // populate with one entry + key := []byte("key") + txnSet(t, db, key, []byte("val1"), 0x00) + require.NoError(t, db.Update(func(txn *Txn) error { + err := txn.Delete(key) + require.NoError(t, err) + + _, err = txn.Get(key) + require.Equal(t, ErrKeyNotFound, err) + return nil + })) + }) +} + +func TestTxnTooBig(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + data := func(i int) []byte { + return []byte(fmt.Sprintf("%b", i)) + } + // n := 500000 + n := 1000 + txn := db.NewTransaction(true) + for i := 0; i < n; { + if err := txn.Set(data(i), data(i)); err != nil { + require.NoError(t, txn.Commit()) + txn = db.NewTransaction(true) + } else { + i++ + } + } + require.NoError(t, txn.Commit()) + + txn = db.NewTransaction(true) + for i := 0; i < n; { + if err := txn.Delete(data(i)); err != nil { + require.NoError(t, txn.Commit()) + txn = db.NewTransaction(true) + } else { + i++ + } + } + require.NoError(t, txn.Commit()) + }) +} + +func TestForceCompactL0(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + + opts := getTestOptions(dir) + opts.ValueLogFileSize = 15 << 20 + opts.managedTxns = true + db, err := Open(opts) + require.NoError(t, err) + + data := func(i int) []byte { + return []byte(fmt.Sprintf("%b", i)) + } + n := 80 + m := 45 // Increasing would cause ErrTxnTooBig + sz := 32 << 10 + v := make([]byte, sz) + for i := 0; i < n; i += 2 { + version := uint64(i) + txn := db.NewTransactionAt(version, true) + for j := 0; j < m; j++ { + require.NoError(t, txn.Set(data(j), v)) + } + require.NoError(t, txn.CommitAt(version+1, nil)) + } + db.Close() + + opts.managedTxns = true + db, err = Open(opts) + require.NoError(t, err) + require.Equal(t, len(db.lc.levels[0].tables), 0) + require.NoError(t, db.Close()) +} + +// Put a lot of data to move some data to disk. +// WARNING: This test might take a while but it should pass! +func TestGetMore(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + + data := func(i int) []byte { + return []byte(fmt.Sprintf("%b", i)) + } + // n := 500000 + n := 10000 + m := 45 // Increasing would cause ErrTxnTooBig + for i := 0; i < n; i += m { + txn := db.NewTransaction(true) + for j := i; j < i+m && j < n; j++ { + require.NoError(t, txn.Set(data(j), data(j))) + } + require.NoError(t, txn.Commit()) + } + require.NoError(t, db.validate()) + + for i := 0; i < n; i++ { + txn := db.NewTransaction(false) + item, err := txn.Get(data(i)) + if err != nil { + t.Error(err) + } + require.EqualValues(t, string(data(i)), string(getItemValue(t, item))) + txn.Discard() + } + + // Overwrite + for i := 0; i < n; i += m { + txn := db.NewTransaction(true) + for j := i; j < i+m && j < n; j++ { + require.NoError(t, txn.Set(data(j), + // Use a long value that will certainly exceed value threshold. + []byte(fmt.Sprintf("zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz%9d", j)))) + } + require.NoError(t, txn.Commit()) + } + require.NoError(t, db.validate()) + + for i := 0; i < n; i++ { + expectedValue := fmt.Sprintf("zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz%9d", i) + k := data(i) + txn := db.NewTransaction(false) + item, err := txn.Get(k) + if err != nil { + t.Error(err) + } + got := string(getItemValue(t, item)) + if expectedValue != got { + + vs, err := db.get(y.KeyWithTs(k, math.MaxUint64)) + require.NoError(t, err) + fmt.Printf("wanted=%q Item: %s\n", k, item) + fmt.Printf("on re-run, got version: %+v\n", vs) + + txn := db.NewTransaction(false) + itr := txn.NewIterator(DefaultIteratorOptions) + for itr.Seek(k); itr.Valid(); itr.Next() { + item := itr.Item() + fmt.Printf("item=%s\n", item) + if !bytes.Equal(item.Key(), k) { + break + } + } + itr.Close() + txn.Discard() + } + require.EqualValues(t, expectedValue, string(getItemValue(t, item)), "wanted=%q Item: %s\n", k, item) + txn.Discard() + } + + // "Delete" key. + for i := 0; i < n; i += m { + if (i % 10000) == 0 { + fmt.Printf("Deleting i=%d\n", i) + } + txn := db.NewTransaction(true) + for j := i; j < i+m && j < n; j++ { + require.NoError(t, txn.Delete(data(j))) + } + require.NoError(t, txn.Commit()) + } + db.validate() + for i := 0; i < n; i++ { + if (i % 10000) == 0 { + // Display some progress. Right now, it's not very fast with no caching. + fmt.Printf("Testing i=%d\n", i) + } + k := data(i) + txn := db.NewTransaction(false) + _, err := txn.Get([]byte(k)) + require.Equal(t, ErrKeyNotFound, err, "should not have found k: %q", k) + txn.Discard() + } + }) +} + +// Put a lot of data to move some data to disk. +// WARNING: This test might take a while but it should pass! +func TestExistsMore(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + // n := 500000 + n := 10000 + m := 45 + for i := 0; i < n; i += m { + if (i % 1000) == 0 { + t.Logf("Putting i=%d\n", i) + } + txn := db.NewTransaction(true) + for j := i; j < i+m && j < n; j++ { + require.NoError(t, txn.Set([]byte(fmt.Sprintf("%09d", j)), + []byte(fmt.Sprintf("%09d", j)))) + } + require.NoError(t, txn.Commit()) + } + db.validate() + + for i := 0; i < n; i++ { + if (i % 1000) == 0 { + fmt.Printf("Testing i=%d\n", i) + } + k := fmt.Sprintf("%09d", i) + require.NoError(t, db.View(func(txn *Txn) error { + _, err := txn.Get([]byte(k)) + require.NoError(t, err) + return nil + })) + } + require.NoError(t, db.View(func(txn *Txn) error { + _, err := txn.Get([]byte("non-exists")) + require.Error(t, err) + return nil + })) + + // "Delete" key. + for i := 0; i < n; i += m { + if (i % 1000) == 0 { + fmt.Printf("Deleting i=%d\n", i) + } + txn := db.NewTransaction(true) + for j := i; j < i+m && j < n; j++ { + require.NoError(t, txn.Delete([]byte(fmt.Sprintf("%09d", j)))) + } + require.NoError(t, txn.Commit()) + } + db.validate() + for i := 0; i < n; i++ { + if (i % 10000) == 0 { + // Display some progress. Right now, it's not very fast with no caching. + fmt.Printf("Testing i=%d\n", i) + } + k := fmt.Sprintf("%09d", i) + + require.NoError(t, db.View(func(txn *Txn) error { + _, err := txn.Get([]byte(k)) + require.Error(t, err) + return nil + })) + } + fmt.Println("Done and closing") + }) +} + +func TestIterate2Basic(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + + bkey := func(i int) []byte { + return []byte(fmt.Sprintf("%09d", i)) + } + bval := func(i int) []byte { + return []byte(fmt.Sprintf("%025d", i)) + } + + // n := 500000 + n := 10000 + for i := 0; i < n; i++ { + if (i % 1000) == 0 { + t.Logf("Put i=%d\n", i) + } + txnSet(t, db, bkey(i), bval(i), byte(i%127)) + } + + opt := IteratorOptions{} + opt.PrefetchValues = true + opt.PrefetchSize = 10 + + txn := db.NewTransaction(false) + it := txn.NewIterator(opt) + { + var count int + rewind := true + t.Log("Starting first basic iteration") + for it.Rewind(); it.Valid(); it.Next() { + item := it.Item() + key := item.Key() + if rewind && count == 5000 { + // Rewind would skip /head/ key, and it.Next() would skip 0. + count = 1 + it.Rewind() + t.Log("Rewinding from 5000 to zero.") + rewind = false + continue + } + require.EqualValues(t, bkey(count), string(key)) + val := getItemValue(t, item) + require.EqualValues(t, bval(count), string(val)) + require.Equal(t, byte(count%127), item.UserMeta()) + count++ + } + require.EqualValues(t, n, count) + } + + { + t.Log("Starting second basic iteration") + idx := 5030 + for it.Seek(bkey(idx)); it.Valid(); it.Next() { + item := it.Item() + require.EqualValues(t, bkey(idx), string(item.Key())) + require.EqualValues(t, bval(idx), string(getItemValue(t, item))) + idx++ + } + } + it.Close() + }) +} + +func TestLoad(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + n := 10000 + { + kv, err := Open(getTestOptions(dir)) + require.NoError(t, err) + for i := 0; i < n; i++ { + if (i % 10000) == 0 { + fmt.Printf("Putting i=%d\n", i) + } + k := []byte(fmt.Sprintf("%09d", i)) + txnSet(t, kv, k, k, 0x00) + } + kv.Close() + } + + kv, err := Open(getTestOptions(dir)) + require.NoError(t, err) + require.Equal(t, uint64(10001), kv.orc.readTs()) + + for i := 0; i < n; i++ { + if (i % 10000) == 0 { + fmt.Printf("Testing i=%d\n", i) + } + k := fmt.Sprintf("%09d", i) + require.NoError(t, kv.View(func(txn *Txn) error { + item, err := txn.Get([]byte(k)) + require.NoError(t, err) + require.EqualValues(t, k, string(getItemValue(t, item))) + return nil + })) + } + kv.Close() + summary := kv.lc.getSummary() + + // Check that files are garbage collected. + idMap := getIDMap(dir) + for fileID := range idMap { + // Check that name is in summary.filenames. + require.True(t, summary.fileIDs[fileID], "%d", fileID) + } + require.EqualValues(t, len(idMap), len(summary.fileIDs)) + + var fileIDs []uint64 + for k := range summary.fileIDs { // Map to array. + fileIDs = append(fileIDs, k) + } + sort.Slice(fileIDs, func(i, j int) bool { return fileIDs[i] < fileIDs[j] }) + fmt.Printf("FileIDs: %v\n", fileIDs) +} + +func TestIterateDeleted(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + txnSet(t, db, []byte("Key1"), []byte("Value1"), 0x00) + txnSet(t, db, []byte("Key2"), []byte("Value2"), 0x00) + + iterOpt := DefaultIteratorOptions + iterOpt.PrefetchValues = false + txn := db.NewTransaction(false) + idxIt := txn.NewIterator(iterOpt) + defer idxIt.Close() + + count := 0 + txn2 := db.NewTransaction(true) + prefix := []byte("Key") + for idxIt.Seek(prefix); idxIt.ValidForPrefix(prefix); idxIt.Next() { + key := idxIt.Item().Key() + count++ + newKey := make([]byte, len(key)) + copy(newKey, key) + require.NoError(t, txn2.Delete(newKey)) + } + require.Equal(t, 2, count) + require.NoError(t, txn2.Commit()) + + for _, prefetch := range [...]bool{true, false} { + t.Run(fmt.Sprintf("Prefetch=%t", prefetch), func(t *testing.T) { + txn := db.NewTransaction(false) + iterOpt = DefaultIteratorOptions + iterOpt.PrefetchValues = prefetch + idxIt = txn.NewIterator(iterOpt) + + var estSize int64 + var idxKeys []string + for idxIt.Seek(prefix); idxIt.Valid(); idxIt.Next() { + item := idxIt.Item() + key := item.Key() + estSize += item.EstimatedSize() + if !bytes.HasPrefix(key, prefix) { + break + } + idxKeys = append(idxKeys, string(key)) + t.Logf("%+v\n", idxIt.Item()) + } + require.Equal(t, 0, len(idxKeys)) + require.Equal(t, int64(0), estSize) + }) + } + }) +} + +func TestIterateParallel(t *testing.T) { + key := func(account int) []byte { + var b [4]byte + binary.BigEndian.PutUint32(b[:], uint32(account)) + return append([]byte("account-"), b[:]...) + } + + N := 100000 + iterate := func(txn *Txn, wg *sync.WaitGroup) { + defer wg.Done() + itr := txn.NewIterator(DefaultIteratorOptions) + defer itr.Close() + + var count int + for itr.Rewind(); itr.Valid(); itr.Next() { + count++ + item := itr.Item() + require.Equal(t, "account-", string(item.Key()[0:8])) + err := item.Value(func(val []byte) error { + require.Equal(t, "1000", string(val)) + return nil + }) + require.NoError(t, err) + } + require.Equal(t, N, count) + itr.Close() // Double close. + } + + opt := DefaultOptions + runBadgerTest(t, &opt, func(t *testing.T, db *DB) { + var wg sync.WaitGroup + var txns []*Txn + for i := 0; i < N; i++ { + wg.Add(1) + txn := db.NewTransaction(true) + require.NoError(t, txn.Set(key(i), []byte("1000"))) + txns = append(txns, txn) + } + for _, txn := range txns { + txn.CommitWith(func(err error) { + y.Check(err) + wg.Done() + }) + } + + wg.Wait() + + // Check that a RW txn can't run multiple iterators. + txn := db.NewTransaction(true) + itr := txn.NewIterator(DefaultIteratorOptions) + require.Panics(t, func() { + txn.NewIterator(DefaultIteratorOptions) + }) + require.Panics(t, txn.Discard) + itr.Close() + txn.Discard() + + // Run multiple iterators for a RO txn. + txn = db.NewTransaction(false) + defer txn.Discard() + wg.Add(3) + go iterate(txn, &wg) + go iterate(txn, &wg) + go iterate(txn, &wg) + wg.Wait() + }) +} + +func TestDeleteWithoutSyncWrite(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + opt := DefaultOptions + opt.Dir = dir + opt.ValueDir = dir + kv, err := Open(opt) + if err != nil { + t.Error(err) + t.Fail() + } + + key := []byte("k1") + // Set a value with size > value threshold so that its written to value log. + txnSet(t, kv, key, []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789FOOBARZOGZOG"), 0x00) + txnDelete(t, kv, key) + kv.Close() + + // Reopen KV + kv, err = Open(opt) + if err != nil { + t.Error(err) + t.Fail() + } + defer kv.Close() + + require.NoError(t, kv.View(func(txn *Txn) error { + _, err := txn.Get(key) + require.Equal(t, ErrKeyNotFound, err) + return nil + })) +} + +func TestPidFile(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + // Reopen database + _, err := Open(getTestOptions(db.opt.Dir)) + require.Error(t, err) + require.Contains(t, err.Error(), "Another process is using this Badger database") + }) +} + +func TestInvalidKey(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + err := db.Update(func(txn *Txn) error { + err := txn.Set([]byte("!badger!head"), nil) + require.Equal(t, ErrInvalidKey, err) + + err = txn.Set([]byte("!badger!"), nil) + require.Equal(t, ErrInvalidKey, err) + + err = txn.Set([]byte("!badger"), []byte("BadgerDB")) + require.NoError(t, err) + return err + }) + require.NoError(t, err) + + require.NoError(t, db.View(func(txn *Txn) error { + item, err := txn.Get([]byte("!badger")) + if err != nil { + return err + } + require.NoError(t, item.Value(func(val []byte) error { + require.Equal(t, []byte("BadgerDB"), val) + return nil + })) + return nil + })) + }) +} + +func TestIteratorPrefetchSize(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + + bkey := func(i int) []byte { + return []byte(fmt.Sprintf("%09d", i)) + } + bval := func(i int) []byte { + return []byte(fmt.Sprintf("%025d", i)) + } + + n := 100 + for i := 0; i < n; i++ { + // if (i % 10) == 0 { + // t.Logf("Put i=%d\n", i) + // } + txnSet(t, db, bkey(i), bval(i), byte(i%127)) + } + + getIteratorCount := func(prefetchSize int) int { + opt := IteratorOptions{} + opt.PrefetchValues = true + opt.PrefetchSize = prefetchSize + + var count int + txn := db.NewTransaction(false) + it := txn.NewIterator(opt) + { + t.Log("Starting first basic iteration") + for it.Rewind(); it.Valid(); it.Next() { + count++ + } + require.EqualValues(t, n, count) + } + return count + } + + var sizes = []int{-10, 0, 1, 10} + for _, size := range sizes { + c := getIteratorCount(size) + require.Equal(t, 100, c) + } + }) +} + +func TestSetIfAbsentAsync(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + kv, _ := Open(getTestOptions(dir)) + + bkey := func(i int) []byte { + return []byte(fmt.Sprintf("%09d", i)) + } + + f := func(err error) {} + + n := 1000 + for i := 0; i < n; i++ { + // if (i % 10) == 0 { + // t.Logf("Put i=%d\n", i) + // } + txn := kv.NewTransaction(true) + _, err = txn.Get(bkey(i)) + require.Equal(t, ErrKeyNotFound, err) + require.NoError(t, txn.SetWithMeta(bkey(i), nil, byte(i%127))) + txn.CommitWith(f) + } + + require.NoError(t, kv.Close()) + kv, err = Open(getTestOptions(dir)) + require.NoError(t, err) + + opt := DefaultIteratorOptions + txn := kv.NewTransaction(false) + var count int + it := txn.NewIterator(opt) + { + t.Log("Starting first basic iteration") + for it.Rewind(); it.Valid(); it.Next() { + count++ + } + require.EqualValues(t, n, count) + } + require.Equal(t, n, count) + require.NoError(t, kv.Close()) +} + +func TestGetSetRace(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + + data := make([]byte, 4096) + _, err := rand.Read(data) + require.NoError(t, err) + + var ( + numOp = 100 + wg sync.WaitGroup + keyCh = make(chan string) + ) + + // writer + wg.Add(1) + go func() { + defer func() { + wg.Done() + close(keyCh) + }() + + for i := 0; i < numOp; i++ { + key := fmt.Sprintf("%d", i) + txnSet(t, db, []byte(key), data, 0x00) + keyCh <- key + } + }() + + // reader + wg.Add(1) + go func() { + defer wg.Done() + + for key := range keyCh { + require.NoError(t, db.View(func(txn *Txn) error { + item, err := txn.Get([]byte(key)) + require.NoError(t, err) + err = item.Value(nil) + require.NoError(t, err) + return nil + })) + } + }() + + wg.Wait() + }) +} + +func TestDiscardVersionsBelow(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + // Write 4 versions of the same key + for i := 0; i < 4; i++ { + err := db.Update(func(txn *Txn) error { + return txn.Set([]byte("answer"), []byte(fmt.Sprintf("%d", i))) + }) + require.NoError(t, err) + } + + opts := DefaultIteratorOptions + opts.AllVersions = true + opts.PrefetchValues = false + + // Verify that there are 4 versions, and record 3rd version (2nd from top in iteration) + db.View(func(txn *Txn) error { + it := txn.NewIterator(opts) + defer it.Close() + var count int + for it.Rewind(); it.Valid(); it.Next() { + count++ + item := it.Item() + require.Equal(t, []byte("answer"), item.Key()) + if item.DiscardEarlierVersions() { + break + } + } + require.Equal(t, 4, count) + return nil + }) + + // Set new version and discard older ones. + err := db.Update(func(txn *Txn) error { + return txn.SetWithDiscard([]byte("answer"), []byte("5"), 0) + }) + require.NoError(t, err) + + // Verify that there are only 2 versions left, and versions + // below ts have been deleted. + db.View(func(txn *Txn) error { + it := txn.NewIterator(opts) + defer it.Close() + var count int + for it.Rewind(); it.Valid(); it.Next() { + count++ + item := it.Item() + require.Equal(t, []byte("answer"), item.Key()) + if item.DiscardEarlierVersions() { + break + } + } + require.Equal(t, 1, count) + return nil + }) + }) +} + +func TestExpiry(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + // Write two keys, one with a TTL + err := db.Update(func(txn *Txn) error { + return txn.Set([]byte("answer1"), []byte("42")) + }) + require.NoError(t, err) + + err = db.Update(func(txn *Txn) error { + return txn.SetWithTTL([]byte("answer2"), []byte("43"), 1*time.Second) + }) + require.NoError(t, err) + + time.Sleep(2 * time.Second) + + // Verify that only unexpired key is found during iteration + err = db.View(func(txn *Txn) error { + _, err := txn.Get([]byte("answer1")) + require.NoError(t, err) + + _, err = txn.Get([]byte("answer2")) + require.Equal(t, ErrKeyNotFound, err) + return nil + }) + require.NoError(t, err) + + // Verify that only one key is found during iteration + opts := DefaultIteratorOptions + opts.PrefetchValues = false + err = db.View(func(txn *Txn) error { + it := txn.NewIterator(opts) + defer it.Close() + var count int + for it.Rewind(); it.Valid(); it.Next() { + count++ + item := it.Item() + require.Equal(t, []byte("answer1"), item.Key()) + } + require.Equal(t, 1, count) + return nil + }) + require.NoError(t, err) + }) +} + +func randBytes(n int) []byte { + recv := make([]byte, n) + in, err := rand.Read(recv) + if err != nil { + log.Fatal(err) + } + return recv[:in] +} + +var benchmarkData = []struct { + key, value []byte +}{ + {randBytes(100), nil}, + {randBytes(1000), []byte("foo")}, + {[]byte("foo"), randBytes(1000)}, + {[]byte(""), randBytes(1000)}, + {nil, randBytes(1000000)}, + {randBytes(100000), nil}, + {randBytes(1000000), nil}, +} + +func TestLargeKeys(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + + opts := new(Options) + *opts = DefaultOptions + opts.ValueLogFileSize = 1024 * 1024 * 1024 + opts.Dir = dir + opts.ValueDir = dir + + db, err := Open(*opts) + if err != nil { + t.Fatal(err) + } + for i := 0; i < 1000; i++ { + tx := db.NewTransaction(true) + for _, kv := range benchmarkData { + k := make([]byte, len(kv.key)) + copy(k, kv.key) + + v := make([]byte, len(kv.value)) + copy(v, kv.value) + if err := tx.Set(k, v); err != nil { + // Skip over this record. + } + } + if err := tx.Commit(); err != nil { + t.Fatalf("#%d: batchSet err: %v", i, err) + } + } +} + +func TestCreateDirs(t *testing.T) { + dir, err := ioutil.TempDir("", "parent") + require.NoError(t, err) + defer os.RemoveAll(dir) + + opts := DefaultOptions + dir = filepath.Join(dir, "badger") + opts.Dir = dir + opts.ValueDir = dir + db, err := Open(opts) + require.NoError(t, err) + db.Close() + _, err = os.Stat(dir) + require.NoError(t, err) +} + +func TestGetSetDeadlock(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + fmt.Println(dir) + require.NoError(t, err) + defer os.RemoveAll(dir) + + opt := DefaultOptions + opt.Dir = dir + opt.ValueDir = dir + opt.ValueLogFileSize = 1 << 20 + db, err := Open(opt) + require.NoError(t, err) + defer db.Close() + + val := make([]byte, 1<<19) + key := []byte("key1") + require.NoError(t, db.Update(func(txn *Txn) error { + rand.Read(val) + require.NoError(t, txn.Set(key, val)) + return nil + })) + + timeout, done := time.After(10*time.Second), make(chan bool) + + go func() { + db.Update(func(txn *Txn) error { + item, err := txn.Get(key) + require.NoError(t, err) + err = item.Value(nil) // This take a RLock on file + require.NoError(t, err) + + rand.Read(val) + require.NoError(t, txn.Set(key, val)) + require.NoError(t, txn.Set([]byte("key2"), val)) + return nil + }) + done <- true + }() + + select { + case <-timeout: + t.Fatal("db.Update did not finish within 10s, assuming deadlock.") + case <-done: + t.Log("db.Update finished.") + } +} + +func TestWriteDeadlock(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + fmt.Println(dir) + require.NoError(t, err) + defer os.RemoveAll(dir) + + opt := DefaultOptions + opt.Dir = dir + opt.ValueDir = dir + opt.ValueLogFileSize = 10 << 20 + db, err := Open(opt) + require.NoError(t, err) + + print := func(count *int) { + *count++ + if *count%100 == 0 { + fmt.Printf("%05d\r", *count) + } + } + + var count int + val := make([]byte, 10000) + require.NoError(t, db.Update(func(txn *Txn) error { + for i := 0; i < 1500; i++ { + key := fmt.Sprintf("%d", i) + rand.Read(val) + require.NoError(t, txn.Set([]byte(key), val)) + print(&count) + } + return nil + })) + + count = 0 + fmt.Println("\nWrites done. Iteration and updates starting...") + err = db.Update(func(txn *Txn) error { + opt := DefaultIteratorOptions + opt.PrefetchValues = false + it := txn.NewIterator(opt) + defer it.Close() + for it.Rewind(); it.Valid(); it.Next() { + item := it.Item() + + // Using Value() would cause deadlock. + // item.Value() + out, err := item.ValueCopy(nil) + require.NoError(t, err) + require.Equal(t, len(val), len(out)) + + key := y.Copy(item.Key()) + rand.Read(val) + require.NoError(t, txn.Set(key, val)) + print(&count) + } + return nil + }) + require.NoError(t, err) +} + +func TestSequence(t *testing.T) { + key0 := []byte("seq0") + key1 := []byte("seq1") + + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + seq0, err := db.GetSequence(key0, 10) + require.NoError(t, err) + seq1, err := db.GetSequence(key1, 100) + require.NoError(t, err) + + for i := uint64(0); i < uint64(105); i++ { + num, err := seq0.Next() + require.NoError(t, err) + require.Equal(t, i, num) + + num, err = seq1.Next() + require.NoError(t, err) + require.Equal(t, i, num) + } + err = db.View(func(txn *Txn) error { + item, err := txn.Get(key0) + if err != nil { + return err + } + var num0 uint64 + if err := item.Value(func(val []byte) error { + num0 = binary.BigEndian.Uint64(val) + return nil + }); err != nil { + return err + } + require.Equal(t, uint64(110), num0) + + item, err = txn.Get(key1) + if err != nil { + return err + } + var num1 uint64 + if err := item.Value(func(val []byte) error { + num1 = binary.BigEndian.Uint64(val) + return nil + }); err != nil { + return err + } + require.Equal(t, uint64(200), num1) + return nil + }) + require.NoError(t, err) + }) +} + +func TestSequence_Release(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + // get sequence, use once and release + key := []byte("key") + seq, err := db.GetSequence(key, 1000) + require.NoError(t, err) + num, err := seq.Next() + require.NoError(t, err) + require.Equal(t, uint64(0), num) + require.NoError(t, seq.Release()) + + // we used up 0 and 1 should be stored now + err = db.View(func(txn *Txn) error { + item, err := txn.Get(key) + if err != nil { + return err + } + val, err := item.ValueCopy(nil) + if err != nil { + return err + } + require.Equal(t, num+1, binary.BigEndian.Uint64(val)) + return nil + }) + require.NoError(t, err) + + // using it again will lease 1+1000 + num, err = seq.Next() + require.NoError(t, err) + require.Equal(t, uint64(1), num) + err = db.View(func(txn *Txn) error { + item, err := txn.Get(key) + if err != nil { + return err + } + val, err := item.ValueCopy(nil) + if err != nil { + return err + } + require.Equal(t, uint64(1001), binary.BigEndian.Uint64(val)) + return nil + }) + require.NoError(t, err) + }) +} + +func uint64ToBytes(i uint64) []byte { + var buf [8]byte + binary.BigEndian.PutUint64(buf[:], i) + return buf[:] +} + +func bytesToUint64(b []byte) uint64 { + return binary.BigEndian.Uint64(b) +} + +// Merge function to add two uint64 numbers +func add(existing, new []byte) []byte { + return uint64ToBytes( + bytesToUint64(existing) + + bytesToUint64(new)) +} + +func TestMergeOperatorGetBeforeAdd(t *testing.T) { + key := []byte("merge") + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + m := db.GetMergeOperator(key, add, 200*time.Millisecond) + defer m.Stop() + + _, err := m.Get() + require.Equal(t, ErrKeyNotFound, err) + }) +} + +func TestMergeOperatorBeforeAdd(t *testing.T) { + key := []byte("merge") + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + m := db.GetMergeOperator(key, add, 200*time.Millisecond) + defer m.Stop() + time.Sleep(time.Second) + }) +} + +func TestMergeOperatorAddAndGet(t *testing.T) { + key := []byte("merge") + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + m := db.GetMergeOperator(key, add, 200*time.Millisecond) + defer m.Stop() + + err := m.Add(uint64ToBytes(1)) + require.NoError(t, err) + m.Add(uint64ToBytes(2)) + require.NoError(t, err) + m.Add(uint64ToBytes(3)) + require.NoError(t, err) + + res, err := m.Get() + require.NoError(t, err) + require.Equal(t, uint64(6), bytesToUint64(res)) + }) +} + +func TestMergeOperatorCompactBeforeGet(t *testing.T) { + key := []byte("merge") + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + m := db.GetMergeOperator(key, add, 200*time.Millisecond) + defer m.Stop() + + err := m.Add(uint64ToBytes(1)) + require.NoError(t, err) + m.Add(uint64ToBytes(2)) + require.NoError(t, err) + m.Add(uint64ToBytes(3)) + require.NoError(t, err) + + time.Sleep(250 * time.Millisecond) // wait for merge to happen + + res, err := m.Get() + require.NoError(t, err) + require.Equal(t, uint64(6), bytesToUint64(res)) + }) +} + +func TestMergeOperatorGetAfterStop(t *testing.T) { + key := []byte("merge") + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + m := db.GetMergeOperator(key, add, 1*time.Second) + + err := m.Add(uint64ToBytes(1)) + require.NoError(t, err) + m.Add(uint64ToBytes(2)) + require.NoError(t, err) + m.Add(uint64ToBytes(3)) + require.NoError(t, err) + + m.Stop() + res, err := m.Get() + require.NoError(t, err) + require.Equal(t, uint64(6), bytesToUint64(res)) + }) +} + +func TestReadOnly(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + opts := getTestOptions(dir) + + // Create the DB + db, err := Open(opts) + require.NoError(t, err) + for i := 0; i < 10000; i++ { + txnSet(t, db, []byte(fmt.Sprintf("key%d", i)), []byte(fmt.Sprintf("value%d", i)), 0x00) + } + + // Attempt a read-only open while it's open read-write. + opts.ReadOnly = true + _, err = Open(opts) + require.Error(t, err) + if err == ErrWindowsNotSupported { + return + } + require.Contains(t, err.Error(), "Another process is using this Badger database") + db.Close() + + // Open one read-only + opts.ReadOnly = true + kv1, err := Open(opts) + require.NoError(t, err) + defer kv1.Close() + + // Open another read-only + kv2, err := Open(opts) + require.NoError(t, err) + defer kv2.Close() + + // Attempt a read-write open while it's open for read-only + opts.ReadOnly = false + _, err = Open(opts) + require.Error(t, err) + require.Contains(t, err.Error(), "Another process is using this Badger database") + + // Get a thing from the DB + txn1 := kv1.NewTransaction(true) + v1, err := txn1.Get([]byte("key1")) + require.NoError(t, err) + b1, err := v1.ValueCopy(nil) + require.NoError(t, err) + require.Equal(t, b1, []byte("value1")) + err = txn1.Commit() + require.NoError(t, err) + + // Get a thing from the DB via the other connection + txn2 := kv2.NewTransaction(true) + v2, err := txn2.Get([]byte("key2000")) + require.NoError(t, err) + b2, err := v2.ValueCopy(nil) + require.NoError(t, err) + require.Equal(t, b2, []byte("value2000")) + err = txn2.Commit() + require.NoError(t, err) + + // Attempt to set a value on a read-only connection + txn := kv1.NewTransaction(true) + err = txn.SetWithMeta([]byte("key"), []byte("value"), 0x00) + require.Error(t, err) + require.Contains(t, err.Error(), "No sets or deletes are allowed in a read-only transaction") + err = txn.Commit() + require.NoError(t, err) +} + +func TestLSMOnly(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + + opts := LSMOnlyOptions + opts.Dir = dir + opts.ValueDir = dir + + dopts := DefaultOptions + require.NotEqual(t, dopts.ValueThreshold, opts.ValueThreshold) + + dopts.ValueThreshold = 1 << 16 + _, err = Open(dopts) + require.Equal(t, ErrValueThreshold, err) + + opts.ValueLogMaxEntries = 100 + db, err := Open(opts) + require.NoError(t, err) + if err != nil { + t.Fatal(err) + } + + value := make([]byte, 128) + _, err = rand.Read(value) + for i := 0; i < 500; i++ { + require.NoError(t, err) + txnSet(t, db, []byte(fmt.Sprintf("key%d", i)), value, 0x00) + } + require.NoError(t, db.Close()) // Close to force compactions, so Value log GC would run. + + db, err = Open(opts) + require.NoError(t, err) + if err != nil { + t.Fatal(err) + } + defer db.Close() + require.NoError(t, db.RunValueLogGC(0.2)) +} + +// This test function is doing some intricate sorcery. +func TestMinReadTs(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + for i := 0; i < 10; i++ { + require.NoError(t, db.Update(func(txn *Txn) error { + return txn.Set([]byte("x"), []byte("y")) + })) + } + time.Sleep(time.Millisecond) + + readTxn0 := db.NewTransaction(false) + require.Equal(t, uint64(10), readTxn0.readTs) + + min := db.orc.readMark.DoneUntil() + require.Equal(t, uint64(9), min) + + readTxn := db.NewTransaction(false) + for i := 0; i < 10; i++ { + require.NoError(t, db.Update(func(txn *Txn) error { + return txn.Set([]byte("x"), []byte("y")) + })) + } + require.Equal(t, uint64(20), db.orc.readTs()) + + time.Sleep(time.Millisecond) + require.Equal(t, min, db.orc.readMark.DoneUntil()) + + readTxn0.Discard() + readTxn.Discard() + time.Sleep(time.Millisecond) + require.Equal(t, uint64(19), db.orc.readMark.DoneUntil()) + db.orc.readMark.Done(uint64(20)) // Because we called readTs. + + for i := 0; i < 10; i++ { + db.View(func(txn *Txn) error { + return nil + }) + } + time.Sleep(time.Millisecond) + require.Equal(t, uint64(20), db.orc.readMark.DoneUntil()) + }) +} + +func TestGoroutineLeak(t *testing.T) { + before := runtime.NumGoroutine() + t.Logf("Num go: %d", before) + for i := 0; i < 12; i++ { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + err := db.Update(func(txn *Txn) error { + return txn.Set([]byte("key"), []byte("value")) + }) + require.NoError(t, err) + }) + } + require.Equal(t, before, runtime.NumGoroutine()) +} + +func ExampleOpen() { + dir, err := ioutil.TempDir("", "badger") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(dir) + opts := DefaultOptions + opts.Dir = dir + opts.ValueDir = dir + db, err := Open(opts) + if err != nil { + log.Fatal(err) + } + defer db.Close() + + err = db.View(func(txn *Txn) error { + _, err := txn.Get([]byte("key")) + // We expect ErrKeyNotFound + fmt.Println(err) + return nil + }) + + if err != nil { + log.Fatal(err) + } + + txn := db.NewTransaction(true) // Read-write txn + err = txn.Set([]byte("key"), []byte("value")) + if err != nil { + log.Fatal(err) + } + err = txn.Commit() + if err != nil { + log.Fatal(err) + } + + err = db.View(func(txn *Txn) error { + item, err := txn.Get([]byte("key")) + if err != nil { + return err + } + val, err := item.ValueCopy(nil) + if err != nil { + return err + } + fmt.Printf("%s\n", string(val)) + return nil + }) + + if err != nil { + log.Fatal(err) + } + + // Output: + // Key not found + // value +} + +func ExampleTxn_NewIterator() { + dir, err := ioutil.TempDir("", "badger") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(dir) + + opts := DefaultOptions + opts.Dir = dir + opts.ValueDir = dir + + db, err := Open(opts) + if err != nil { + log.Fatal(err) + } + defer db.Close() + + bkey := func(i int) []byte { + return []byte(fmt.Sprintf("%09d", i)) + } + bval := func(i int) []byte { + return []byte(fmt.Sprintf("%025d", i)) + } + + txn := db.NewTransaction(true) + + // Fill in 1000 items + n := 1000 + for i := 0; i < n; i++ { + err := txn.Set(bkey(i), bval(i)) + if err != nil { + log.Fatal(err) + } + } + + err = txn.Commit() + if err != nil { + log.Fatal(err) + } + + opt := DefaultIteratorOptions + opt.PrefetchSize = 10 + + // Iterate over 1000 items + var count int + err = db.View(func(txn *Txn) error { + it := txn.NewIterator(opt) + defer it.Close() + for it.Rewind(); it.Valid(); it.Next() { + count++ + } + return nil + }) + if err != nil { + log.Fatal(err) + } + fmt.Printf("Counted %d elements", count) + // Output: + // Counted 1000 elements +} + +func TestMain(m *testing.M) { + // call flag.Parse() here if TestMain uses flags + go func() { + if err := http.ListenAndServe("localhost:8080", nil); err != nil { + log.Fatalf("Unable to open http port at 8080") + } + }() + os.Exit(m.Run()) +} diff --git a/vendor/github.com/dgraph-io/badger/dir_unix.go b/vendor/github.com/dgraph-io/badger/dir_unix.go new file mode 100644 index 00000000..4f809b66 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/dir_unix.go @@ -0,0 +1,100 @@ +// +build !windows + +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "gx/ipfs/QmVGjyM9i2msKvLXwh9VosCTgP4mL91kC7hDmqnwTTx6Hu/sys/unix" + "gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors" +) + +// directoryLockGuard holds a lock on a directory and a pid file inside. The pid file isn't part +// of the locking mechanism, it's just advisory. +type directoryLockGuard struct { + // File handle on the directory, which we've flocked. + f *os.File + // The absolute path to our pid file. + path string + // Was this a shared lock for a read-only database? + readOnly bool +} + +// acquireDirectoryLock gets a lock on the directory (using flock). If +// this is not read-only, it will also write our pid to +// dirPath/pidFileName for convenience. +func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) (*directoryLockGuard, error) { + // Convert to absolute path so that Release still works even if we do an unbalanced + // chdir in the meantime. + absPidFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName)) + if err != nil { + return nil, errors.Wrap(err, "cannot get absolute path for pid lock file") + } + f, err := os.Open(dirPath) + if err != nil { + return nil, errors.Wrapf(err, "cannot open directory %q", dirPath) + } + opts := unix.LOCK_EX | unix.LOCK_NB + if readOnly { + opts = unix.LOCK_SH | unix.LOCK_NB + } + + err = unix.Flock(int(f.Fd()), opts) + if err != nil { + f.Close() + return nil, errors.Wrapf(err, + "Cannot acquire directory lock on %q. Another process is using this Badger database.", + dirPath) + } + + if !readOnly { + // Yes, we happily overwrite a pre-existing pid file. We're the + // only read-write badger process using this directory. + err = ioutil.WriteFile(absPidFilePath, []byte(fmt.Sprintf("%d\n", os.Getpid())), 0666) + if err != nil { + f.Close() + return nil, errors.Wrapf(err, + "Cannot write pid file %q", absPidFilePath) + } + } + return &directoryLockGuard{f, absPidFilePath, readOnly}, nil +} + +// Release deletes the pid file and releases our lock on the directory. +func (guard *directoryLockGuard) release() error { + var err error + if !guard.readOnly { + // It's important that we remove the pid file first. + err = os.Remove(guard.path) + } + + if closeErr := guard.f.Close(); err == nil { + err = closeErr + } + guard.path = "" + guard.f = nil + + return err +} + +// openDir opens a directory for syncing. +func openDir(path string) (*os.File, error) { return os.Open(path) } diff --git a/vendor/github.com/dgraph-io/badger/dir_windows.go b/vendor/github.com/dgraph-io/badger/dir_windows.go new file mode 100644 index 00000000..248bd27e --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/dir_windows.go @@ -0,0 +1,106 @@ +// +build windows + +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +// OpenDir opens a directory in windows with write access for syncing. +import ( + "os" + "path/filepath" + "syscall" + + "gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors" +) + +// FILE_ATTRIBUTE_TEMPORARY - A file that is being used for temporary storage. +// FILE_FLAG_DELETE_ON_CLOSE - The file is to be deleted immediately after all of its handles are +// closed, which includes the specified handle and any other open or duplicated handles. +// See: https://docs.microsoft.com/en-us/windows/desktop/FileIO/file-attribute-constants +// NOTE: Added here to avoid importing golang.org/x/sys/windows +const ( + FILE_ATTRIBUTE_TEMPORARY = 0x00000100 + FILE_FLAG_DELETE_ON_CLOSE = 0x04000000 +) + +func openDir(path string) (*os.File, error) { + fd, err := openDirWin(path) + if err != nil { + return nil, err + } + return os.NewFile(uintptr(fd), path), nil +} + +func openDirWin(path string) (fd syscall.Handle, err error) { + if len(path) == 0 { + return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND + } + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return syscall.InvalidHandle, err + } + access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE) + sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE) + createmode := uint32(syscall.OPEN_EXISTING) + fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS) + return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0) +} + +// DirectoryLockGuard holds a lock on the directory. +type directoryLockGuard struct { + h syscall.Handle + path string +} + +// AcquireDirectoryLock acquires exclusive access to a directory. +func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) (*directoryLockGuard, error) { + if readOnly { + return nil, ErrWindowsNotSupported + } + + // Convert to absolute path so that Release still works even if we do an unbalanced + // chdir in the meantime. + absLockFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName)) + if err != nil { + return nil, errors.Wrap(err, "Cannot get absolute path for pid lock file") + } + + // This call creates a file handler in memory that only one process can use at a time. When + // that process ends, the file is deleted by the system. + // FILE_ATTRIBUTE_TEMPORARY is used to tell Windows to try to create the handle in memory. + // FILE_FLAG_DELETE_ON_CLOSE is not specified in syscall_windows.go but tells Windows to delete + // the file when all processes holding the handler are closed. + // XXX: this works but it's a bit klunky. i'd prefer to use LockFileEx but it needs unsafe pkg. + h, err := syscall.CreateFile( + syscall.StringToUTF16Ptr(absLockFilePath), 0, 0, nil, + syscall.OPEN_ALWAYS, + uint32(FILE_ATTRIBUTE_TEMPORARY|FILE_FLAG_DELETE_ON_CLOSE), + 0) + if err != nil { + return nil, errors.Wrapf(err, + "Cannot create lock file %q. Another process is using this Badger database", + absLockFilePath) + } + + return &directoryLockGuard{h: h, path: absLockFilePath}, nil +} + +// Release removes the directory lock. +func (g *directoryLockGuard) release() error { + g.path = "" + return syscall.CloseHandle(g.h) +} diff --git a/vendor/github.com/dgraph-io/badger/doc.go b/vendor/github.com/dgraph-io/badger/doc.go new file mode 100644 index 00000000..83dc9a28 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/doc.go @@ -0,0 +1,28 @@ +/* +Package badger implements an embeddable, simple and fast key-value database, +written in pure Go. It is designed to be highly performant for both reads and +writes simultaneously. Badger uses Multi-Version Concurrency Control (MVCC), and +supports transactions. It runs transactions concurrently, with serializable +snapshot isolation guarantees. + +Badger uses an LSM tree along with a value log to separate keys from values, +hence reducing both write amplification and the size of the LSM tree. This +allows LSM tree to be served entirely from RAM, while the values are served +from SSD. + + +Usage + +Badger has the following main types: DB, Txn, Item and Iterator. DB contains +keys that are associated with values. It must be opened with the appropriate +options before it can be accessed. + +All operations happen inside a Txn. Txn represents a transaction, which can +be read-only or read-write. Read-only transactions can read values for a +given key (which are returned inside an Item), or iterate over a set of +key-value pairs using an Iterator (which are returned as Item type values as +well). Read-write transactions can also update and delete keys from the DB. + +See the examples for more usage details. +*/ +package badger diff --git a/vendor/github.com/dgraph-io/badger/errors.go b/vendor/github.com/dgraph-io/badger/errors.go new file mode 100644 index 00000000..00836109 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/errors.go @@ -0,0 +1,105 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors" +) + +var ( + // ErrValueLogSize is returned when opt.ValueLogFileSize option is not within the valid + // range. + ErrValueLogSize = errors.New("Invalid ValueLogFileSize, must be between 1MB and 2GB") + + // ErrValueThreshold is returned when ValueThreshold is set to a value close to or greater than + // uint16. + ErrValueThreshold = errors.New("Invalid ValueThreshold, must be lower than uint16") + + // ErrKeyNotFound is returned when key isn't found on a txn.Get. + ErrKeyNotFound = errors.New("Key not found") + + // ErrTxnTooBig is returned if too many writes are fit into a single transaction. + ErrTxnTooBig = errors.New("Txn is too big to fit into one request") + + // ErrConflict is returned when a transaction conflicts with another transaction. This can happen if + // the read rows had been updated concurrently by another transaction. + ErrConflict = errors.New("Transaction Conflict. Please retry") + + // ErrReadOnlyTxn is returned if an update function is called on a read-only transaction. + ErrReadOnlyTxn = errors.New("No sets or deletes are allowed in a read-only transaction") + + // ErrDiscardedTxn is returned if a previously discarded transaction is re-used. + ErrDiscardedTxn = errors.New("This transaction has been discarded. Create a new one") + + // ErrEmptyKey is returned if an empty key is passed on an update function. + ErrEmptyKey = errors.New("Key cannot be empty") + + // ErrInvalidKey is returned if the key has a special !badger! prefix, + // reserved for internal usage. + ErrInvalidKey = errors.New("Key is using a reserved !badger! prefix") + + // ErrRetry is returned when a log file containing the value is not found. + // This usually indicates that it may have been garbage collected, and the + // operation needs to be retried. + ErrRetry = errors.New("Unable to find log file. Please retry") + + // ErrThresholdZero is returned if threshold is set to zero, and value log GC is called. + // In such a case, GC can't be run. + ErrThresholdZero = errors.New( + "Value log GC can't run because threshold is set to zero") + + // ErrNoRewrite is returned if a call for value log GC doesn't result in a log file rewrite. + ErrNoRewrite = errors.New( + "Value log GC attempt didn't result in any cleanup") + + // ErrRejected is returned if a value log GC is called either while another GC is running, or + // after DB::Close has been called. + ErrRejected = errors.New("Value log GC request rejected") + + // ErrInvalidRequest is returned if the user request is invalid. + ErrInvalidRequest = errors.New("Invalid request") + + // ErrManagedTxn is returned if the user tries to use an API which isn't + // allowed due to external management of transactions, when using ManagedDB. + ErrManagedTxn = errors.New( + "Invalid API request. Not allowed to perform this action using ManagedDB") + + // ErrInvalidDump if a data dump made previously cannot be loaded into the database. + ErrInvalidDump = errors.New("Data dump cannot be read") + + // ErrZeroBandwidth is returned if the user passes in zero bandwidth for sequence. + ErrZeroBandwidth = errors.New("Bandwidth must be greater than zero") + + // ErrInvalidLoadingMode is returned when opt.ValueLogLoadingMode option is not + // within the valid range + ErrInvalidLoadingMode = errors.New("Invalid ValueLogLoadingMode, must be FileIO or MemoryMap") + + // ErrReplayNeeded is returned when opt.ReadOnly is set but the + // database requires a value log replay. + ErrReplayNeeded = errors.New("Database was not properly closed, cannot open read-only") + + // ErrWindowsNotSupported is returned when opt.ReadOnly is used on Windows + ErrWindowsNotSupported = errors.New("Read-only mode is not supported on Windows") + + // ErrTruncateNeeded is returned when the value log gets corrupt, and requires truncation of + // corrupt data to allow Badger to run properly. + ErrTruncateNeeded = errors.New("Value log truncate required to run DB. This might result in data loss") + + // ErrBlockedWrites is returned if the user called DropAll. During the process of dropping all + // data from Badger, we stop accepting new writes, by returning this error. + ErrBlockedWrites = errors.New("Writes are blocked, possibly due to DropAll or Close") +) diff --git a/vendor/github.com/dgraph-io/badger/images/benchmarks-rocksdb.png b/vendor/github.com/dgraph-io/badger/images/benchmarks-rocksdb.png new file mode 100644 index 00000000..27081e81 Binary files /dev/null and b/vendor/github.com/dgraph-io/badger/images/benchmarks-rocksdb.png differ diff --git a/vendor/github.com/dgraph-io/badger/images/diggy-shadow.png b/vendor/github.com/dgraph-io/badger/images/diggy-shadow.png new file mode 100644 index 00000000..19ba3f2c Binary files /dev/null and b/vendor/github.com/dgraph-io/badger/images/diggy-shadow.png differ diff --git a/vendor/github.com/dgraph-io/badger/integration/testgc/.gitignore b/vendor/github.com/dgraph-io/badger/integration/testgc/.gitignore new file mode 100644 index 00000000..f6600666 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/integration/testgc/.gitignore @@ -0,0 +1 @@ +/testgc diff --git a/vendor/github.com/dgraph-io/badger/integration/testgc/main.go b/vendor/github.com/dgraph-io/badger/integration/testgc/main.go new file mode 100644 index 00000000..a1852bb1 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/integration/testgc/main.go @@ -0,0 +1,218 @@ +package main + +import ( + "encoding/binary" + "fmt" + "log" + "math/rand" + "net/http" + _ "net/http/pprof" + "os" + "sync" + "sync/atomic" + "time" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/options" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" +) + +var maxValue int64 = 10000000 +var suffix = make([]byte, 128) + +type testSuite struct { + sync.Mutex + vals map[uint64]uint64 + + count uint64 // Not under mutex lock. +} + +func encoded(i uint64) []byte { + out := make([]byte, 8) + binary.BigEndian.PutUint64(out, i) + return out +} + +func (s *testSuite) write(db *badger.DB) error { + return db.Update(func(txn *badger.Txn) error { + for i := 0; i < 10; i++ { + // These keys would be overwritten. + keyi := uint64(rand.Int63n(maxValue)) + key := encoded(keyi) + vali := atomic.AddUint64(&s.count, 1) + val := encoded(vali) + val = append(val, suffix...) + if err := txn.Set(key, val); err != nil { + return err + } + } + for i := 0; i < 20; i++ { + // These keys would be new and never overwritten. + keyi := atomic.AddUint64(&s.count, 1) + if keyi%1000000 == 0 { + log.Printf("Count: %d\n", keyi) + } + key := encoded(keyi) + val := append(key, suffix...) + if err := txn.Set(key, val); err != nil { + return err + } + } + return nil + }) +} + +func (s *testSuite) read(db *badger.DB) error { + max := int64(atomic.LoadUint64(&s.count)) + keyi := uint64(rand.Int63n(max)) + key := encoded(keyi) + + err := db.View(func(txn *badger.Txn) error { + item, err := txn.Get(key) + if err != nil { + return err + } + val, err := item.ValueCopy(nil) + if err != nil { + return err + } + y.AssertTruef(len(val) == len(suffix)+8, "Found val of len: %d\n", len(val)) + vali := binary.BigEndian.Uint64(val[0:8]) + s.Lock() + expected := s.vals[keyi] + if vali < expected { + log.Fatalf("Expected: %d. Found: %d. Key: %d\n", expected, vali, keyi) + } else if vali == expected { + // pass + } else { + s.vals[keyi] = vali + } + s.Unlock() + return nil + }) + if err == badger.ErrKeyNotFound { + return nil + } + return err +} + +func main() { + fmt.Println("Badger Integration test for value log GC.") + + dir := "/mnt/drive/badgertest" + os.RemoveAll(dir) + + opts := badger.DefaultOptions + opts.Dir = dir + opts.ValueDir = dir + opts.TableLoadingMode = options.MemoryMap + opts.ValueLogLoadingMode = options.FileIO + opts.SyncWrites = false + + db, err := badger.Open(opts) + if err != nil { + log.Fatal(err) + } + defer db.Close() + + go http.ListenAndServe("localhost:8080", nil) + + closer := y.NewCloser(11) + go func() { + // Run value log GC. + defer closer.Done() + var count int + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + for range ticker.C { + again: + select { + case <-closer.HasBeenClosed(): + log.Printf("Num times value log GC was successful: %d\n", count) + return + default: + } + log.Printf("Starting a value log GC") + err := db.RunValueLogGC(0.1) + log.Printf("Result of value log GC: %v\n", err) + if err == nil { + count++ + goto again + } + } + }() + + s := testSuite{ + count: uint64(maxValue), + vals: make(map[uint64]uint64), + } + var numLoops uint64 + ticker := time.NewTicker(5 * time.Second) + for i := 0; i < 10; i++ { + go func() { + defer closer.Done() + for { + if err := s.write(db); err != nil { + log.Fatal(err) + } + for j := 0; j < 10; j++ { + if err := s.read(db); err != nil { + log.Fatal(err) + } + } + nl := atomic.AddUint64(&numLoops, 1) + select { + case <-closer.HasBeenClosed(): + return + case <-ticker.C: + log.Printf("Num loops: %d\n", nl) + default: + } + } + }() + } + time.Sleep(5 * time.Minute) + log.Println("Signaling...") + closer.SignalAndWait() + log.Println("Wait done. Now iterating over everything.") + + err = db.View(func(txn *badger.Txn) error { + iopts := badger.DefaultIteratorOptions + itr := txn.NewIterator(iopts) + defer itr.Close() + + var total, tested int + for itr.Rewind(); itr.Valid(); itr.Next() { + item := itr.Item() + key := item.Key() + keyi := binary.BigEndian.Uint64(key) + total++ + + val, err := item.ValueCopy(nil) + if err != nil { + return err + } + if len(val) < 8 { + log.Printf("Unexpected value: %x\n", val) + continue + } + vali := binary.BigEndian.Uint64(val[0:8]) + + expected, ok := s.vals[keyi] // Not all keys must be in vals map. + if ok { + tested++ + if vali < expected { + // vali must be equal or greater than what's in the map. + log.Fatalf("Expected: %d. Got: %d. Key: %d\n", expected, vali, keyi) + } + } + } + log.Printf("Total iterated: %d. Tested values: %d\n", total, tested) + return nil + }) + if err != nil { + log.Fatalf("Error while iterating: %v", err) + } + log.Println("Iteration done. Test successful.") + time.Sleep(time.Minute) // Time to do some poking around. +} diff --git a/vendor/github.com/dgraph-io/badger/iterator.go b/vendor/github.com/dgraph-io/badger/iterator.go new file mode 100644 index 00000000..1584a94f --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/iterator.go @@ -0,0 +1,678 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "bytes" + "fmt" + "hash/crc32" + "sync" + "sync/atomic" + "time" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/options" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/table" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" +) + +type prefetchStatus uint8 + +const ( + prefetched prefetchStatus = iota + 1 +) + +// Item is returned during iteration. Both the Key() and Value() output is only valid until +// iterator.Next() is called. +type Item struct { + status prefetchStatus + err error + wg sync.WaitGroup + db *DB + key []byte + vptr []byte + meta byte // We need to store meta to know about bitValuePointer. + userMeta byte + expiresAt uint64 + val []byte + slice *y.Slice // Used only during prefetching. + next *Item + version uint64 + txn *Txn +} + +// String returns a string representation of Item +func (item *Item) String() string { + return fmt.Sprintf("key=%q, version=%d, meta=%x", item.Key(), item.Version(), item.meta) +} + +// Key returns the key. +// +// Key is only valid as long as item is valid, or transaction is valid. If you need to use it +// outside its validity, please use KeyCopy. +func (item *Item) Key() []byte { + return item.key +} + +// KeyCopy returns a copy of the key of the item, writing it to dst slice. +// If nil is passed, or capacity of dst isn't sufficient, a new slice would be allocated and +// returned. +func (item *Item) KeyCopy(dst []byte) []byte { + return y.SafeCopy(dst, item.key) +} + +// Version returns the commit timestamp of the item. +func (item *Item) Version() uint64 { + return item.version +} + +// Value retrieves the value of the item from the value log. +// +// This method must be called within a transaction. Calling it outside a +// transaction is considered undefined behavior. If an iterator is being used, +// then Item.Value() is defined in the current iteration only, because items are +// reused. +// +// If you need to use a value outside a transaction, please use Item.ValueCopy +// instead, or copy it yourself. Value might change once discard or commit is called. +// Use ValueCopy if you want to do a Set after Get. +func (item *Item) Value(fn func(val []byte) error) error { + item.wg.Wait() + if item.status == prefetched { + if item.err == nil && fn != nil { + if err := fn(item.val); err != nil { + return err + } + } + return item.err + } + buf, cb, err := item.yieldItemValue() + defer runCallback(cb) + if err != nil { + return err + } + if fn != nil { + return fn(buf) + } + return nil +} + +// ValueCopy returns a copy of the value of the item from the value log, writing it to dst slice. +// If nil is passed, or capacity of dst isn't sufficient, a new slice would be allocated and +// returned. Tip: It might make sense to reuse the returned slice as dst argument for the next call. +// +// This function is useful in long running iterate/update transactions to avoid a write deadlock. +// See Github issue: https://github.com/dgraph-io/badger/issues/315 +func (item *Item) ValueCopy(dst []byte) ([]byte, error) { + item.wg.Wait() + if item.status == prefetched { + return y.SafeCopy(dst, item.val), item.err + } + buf, cb, err := item.yieldItemValue() + defer runCallback(cb) + return y.SafeCopy(dst, buf), err +} + +func (item *Item) hasValue() bool { + if item.meta == 0 && item.vptr == nil { + // key not found + return false + } + return true +} + +// IsDeletedOrExpired returns true if item contains deleted or expired value. +func (item *Item) IsDeletedOrExpired() bool { + return isDeletedOrExpired(item.meta, item.expiresAt) +} + +// DiscardEarlierVersions returns whether the iterator was created with the +// option to discard earlier versions of a key when multiple are available. +func (item *Item) DiscardEarlierVersions() bool { + return item.meta&bitDiscardEarlierVersions > 0 +} + +func (item *Item) yieldItemValue() ([]byte, func(), error) { + key := item.Key() // No need to copy. + for { + if !item.hasValue() { + return nil, nil, nil + } + + if item.slice == nil { + item.slice = new(y.Slice) + } + + if (item.meta & bitValuePointer) == 0 { + val := item.slice.Resize(len(item.vptr)) + copy(val, item.vptr) + return val, nil, nil + } + + var vp valuePointer + vp.Decode(item.vptr) + result, cb, err := item.db.vlog.Read(vp, item.slice) + if err != ErrRetry { + return result, cb, err + } + if bytes.HasPrefix(key, badgerMove) { + // err == ErrRetry + // Error is retry even after checking the move keyspace. So, let's + // just assume that value is not present. + return nil, cb, nil + } + + // The value pointer is pointing to a deleted value log. Look for the + // move key and read that instead. + runCallback(cb) + // Do not put badgerMove on the left in append. It seems to cause some sort of manipulation. + keyTs := y.KeyWithTs(item.Key(), item.Version()) + key = make([]byte, len(badgerMove)+len(keyTs)) + n := copy(key, badgerMove) + copy(key[n:], keyTs) + // Note that we can't set item.key to move key, because that would + // change the key user sees before and after this call. Also, this move + // logic is internal logic and should not impact the external behavior + // of the retrieval. + vs, err := item.db.get(key) + if err != nil { + return nil, nil, err + } + if vs.Version != item.Version() { + return nil, nil, nil + } + // Bug fix: Always copy the vs.Value into vptr here. Otherwise, when item is reused this + // slice gets overwritten. + item.vptr = y.SafeCopy(item.vptr, vs.Value) + item.meta &^= bitValuePointer // Clear the value pointer bit. + if vs.Meta&bitValuePointer > 0 { + item.meta |= bitValuePointer // This meta would only be about value pointer. + } + } +} + +func runCallback(cb func()) { + if cb != nil { + cb() + } +} + +func (item *Item) prefetchValue() { + val, cb, err := item.yieldItemValue() + defer runCallback(cb) + + item.err = err + item.status = prefetched + if val == nil { + return + } + if item.db.opt.ValueLogLoadingMode == options.MemoryMap { + buf := item.slice.Resize(len(val)) + copy(buf, val) + item.val = buf + } else { + item.val = val + } +} + +// EstimatedSize returns the approximate size of the key-value pair. +// +// This can be called while iterating through a store to quickly estimate the +// size of a range of key-value pairs (without fetching the corresponding +// values). +func (item *Item) EstimatedSize() int64 { + if !item.hasValue() { + return 0 + } + if (item.meta & bitValuePointer) == 0 { + return int64(len(item.key) + len(item.vptr)) + } + var vp valuePointer + vp.Decode(item.vptr) + return int64(vp.Len) // includes key length. +} + +// ValueSize returns the exact size of the value. +// +// This can be called to quickly estimate the size of a value without fetching +// it. +func (item *Item) ValueSize() int64 { + if !item.hasValue() { + return 0 + } + if (item.meta & bitValuePointer) == 0 { + return int64(len(item.vptr)) + } + var vp valuePointer + vp.Decode(item.vptr) + + klen := int64(len(item.key) + 8) // 8 bytes for timestamp. + return int64(vp.Len) - klen - headerBufSize - crc32.Size +} + +// UserMeta returns the userMeta set by the user. Typically, this byte, optionally set by the user +// is used to interpret the value. +func (item *Item) UserMeta() byte { + return item.userMeta +} + +// ExpiresAt returns a Unix time value indicating when the item will be +// considered expired. 0 indicates that the item will never expire. +func (item *Item) ExpiresAt() uint64 { + return item.expiresAt +} + +// TODO: Switch this to use linked list container in Go. +type list struct { + head *Item + tail *Item +} + +func (l *list) push(i *Item) { + i.next = nil + if l.tail == nil { + l.head = i + l.tail = i + return + } + l.tail.next = i + l.tail = i +} + +func (l *list) pop() *Item { + if l.head == nil { + return nil + } + i := l.head + if l.head == l.tail { + l.tail = nil + l.head = nil + } else { + l.head = i.next + } + i.next = nil + return i +} + +// IteratorOptions is used to set options when iterating over Badger key-value +// stores. +// +// This package provides DefaultIteratorOptions which contains options that +// should work for most applications. Consider using that as a starting point +// before customizing it for your own needs. +type IteratorOptions struct { + // Indicates whether we should prefetch values during iteration and store them. + PrefetchValues bool + // How many KV pairs to prefetch while iterating. Valid only if PrefetchValues is true. + PrefetchSize int + Reverse bool // Direction of iteration. False is forward, true is backward. + AllVersions bool // Fetch all valid versions of the same key. + + // The following option is used to narrow down the SSTables that iterator picks up. If + // Prefix is specified, only tables which could have this prefix are picked based on their range + // of keys. + Prefix []byte // Only iterate over this given prefix. + prefixIsKey bool // If set, use the prefix for bloom filter lookup. + + internalAccess bool // Used to allow internal access to badger keys. +} + +func (opt *IteratorOptions) pickTable(t table.TableInterface) bool { + if len(opt.Prefix) == 0 { + return true + } + trim := func(key []byte) []byte { + if len(key) > len(opt.Prefix) { + return key[:len(opt.Prefix)] + } + return key + } + if bytes.Compare(trim(t.Smallest()), opt.Prefix) > 0 { + return false + } + if bytes.Compare(trim(t.Biggest()), opt.Prefix) < 0 { + return false + } + // Bloom filter lookup would only work if opt.Prefix does NOT have the read + // timestamp as part of the key. + if opt.prefixIsKey && t.DoesNotHave(opt.Prefix) { + return false + } + return true +} + +// DefaultIteratorOptions contains default options when iterating over Badger key-value stores. +var DefaultIteratorOptions = IteratorOptions{ + PrefetchValues: true, + PrefetchSize: 100, + Reverse: false, + AllVersions: false, +} + +// Iterator helps iterating over the KV pairs in a lexicographically sorted order. +type Iterator struct { + iitr *y.MergeIterator + txn *Txn + readTs uint64 + + opt IteratorOptions + item *Item + data list + waste list + + lastKey []byte // Used to skip over multiple versions of the same key. + + closed bool +} + +// NewIterator returns a new iterator. Depending upon the options, either only keys, or both +// key-value pairs would be fetched. The keys are returned in lexicographically sorted order. +// Using prefetch is recommended if you're doing a long running iteration, for performance. +// +// Multiple Iterators: +// For a read-only txn, multiple iterators can be running simultaneously. However, for a read-write +// txn, only one can be running at one time to avoid race conditions, because Txn is thread-unsafe. +func (txn *Txn) NewIterator(opt IteratorOptions) *Iterator { + if txn.discarded { + panic("Transaction has already been discarded") + } + // Do not change the order of the next if. We must track the number of running iterators. + if atomic.AddInt32(&txn.numIterators, 1) > 1 && txn.update { + atomic.AddInt32(&txn.numIterators, -1) + panic("Only one iterator can be active at one time, for a RW txn.") + } + + // TODO: If Prefix is set, only pick those memtables which have keys with + // the prefix. + tables, decr := txn.db.getMemTables() + defer decr() + txn.db.vlog.incrIteratorCount() + var iters []y.Iterator + if itr := txn.newPendingWritesIterator(opt.Reverse); itr != nil { + iters = append(iters, itr) + } + for i := 0; i < len(tables); i++ { + iters = append(iters, tables[i].NewUniIterator(opt.Reverse)) + } + iters = txn.db.lc.appendIterators(iters, &opt) // This will increment references. + res := &Iterator{ + txn: txn, + iitr: y.NewMergeIterator(iters, opt.Reverse), + opt: opt, + readTs: txn.readTs, + } + return res +} + +// NewKeyIterator is just like NewIterator, but allows the user to iterate over all versions of a +// single key. Internally, it sets the Prefix option in provided opt, and uses that prefix to +// additionally run bloom filter lookups before picking tables from the LSM tree. +func (txn *Txn) NewKeyIterator(key []byte, opt IteratorOptions) *Iterator { + if len(opt.Prefix) > 0 { + panic("opt.Prefix should be nil for NewKeyIterator.") + } + opt.Prefix = key // This key must be without the timestamp. + opt.prefixIsKey = true + return txn.NewIterator(opt) +} + +func (it *Iterator) newItem() *Item { + item := it.waste.pop() + if item == nil { + item = &Item{slice: new(y.Slice), db: it.txn.db, txn: it.txn} + } + return item +} + +// Item returns pointer to the current key-value pair. +// This item is only valid until it.Next() gets called. +func (it *Iterator) Item() *Item { + tx := it.txn + tx.addReadKey(it.item.Key()) + return it.item +} + +// Valid returns false when iteration is done. +func (it *Iterator) Valid() bool { + if it.item == nil { + return false + } + return bytes.HasPrefix(it.item.key, it.opt.Prefix) +} + +// ValidForPrefix returns false when iteration is done +// or when the current key is not prefixed by the specified prefix. +func (it *Iterator) ValidForPrefix(prefix []byte) bool { + return it.Valid() && bytes.HasPrefix(it.item.key, prefix) +} + +// Close would close the iterator. It is important to call this when you're done with iteration. +func (it *Iterator) Close() { + if it.closed { + return + } + it.closed = true + + it.iitr.Close() + // It is important to wait for the fill goroutines to finish. Otherwise, we might leave zombie + // goroutines behind, which are waiting to acquire file read locks after DB has been closed. + waitFor := func(l list) { + item := l.pop() + for item != nil { + item.wg.Wait() + item = l.pop() + } + } + waitFor(it.waste) + waitFor(it.data) + + // TODO: We could handle this error. + _ = it.txn.db.vlog.decrIteratorCount() + atomic.AddInt32(&it.txn.numIterators, -1) +} + +// Next would advance the iterator by one. Always check it.Valid() after a Next() +// to ensure you have access to a valid it.Item(). +func (it *Iterator) Next() { + // Reuse current item + it.item.wg.Wait() // Just cleaner to wait before pushing to avoid doing ref counting. + it.waste.push(it.item) + + // Set next item to current + it.item = it.data.pop() + + for it.iitr.Valid() { + if it.parseItem() { + // parseItem calls one extra next. + // This is used to deal with the complexity of reverse iteration. + break + } + } +} + +func isDeletedOrExpired(meta byte, expiresAt uint64) bool { + if meta&bitDelete > 0 { + return true + } + if expiresAt == 0 { + return false + } + return expiresAt <= uint64(time.Now().Unix()) +} + +// parseItem is a complex function because it needs to handle both forward and reverse iteration +// implementation. We store keys such that their versions are sorted in descending order. This makes +// forward iteration efficient, but revese iteration complicated. This tradeoff is better because +// forward iteration is more common than reverse. +// +// This function advances the iterator. +func (it *Iterator) parseItem() bool { + mi := it.iitr + key := mi.Key() + + setItem := func(item *Item) { + if it.item == nil { + it.item = item + } else { + it.data.push(item) + } + } + + // Skip badger keys. + if !it.opt.internalAccess && bytes.HasPrefix(key, badgerPrefix) { + mi.Next() + return false + } + + // Skip any versions which are beyond the readTs. + version := y.ParseTs(key) + if version > it.readTs { + mi.Next() + return false + } + + if it.opt.AllVersions { + // Return deleted or expired values also, otherwise user can't figure out + // whether the key was deleted. + item := it.newItem() + it.fill(item) + setItem(item) + mi.Next() + return true + } + + // If iterating in forward direction, then just checking the last key against current key would + // be sufficient. + if !it.opt.Reverse { + if y.SameKey(it.lastKey, key) { + mi.Next() + return false + } + // Only track in forward direction. + // We should update lastKey as soon as we find a different key in our snapshot. + // Consider keys: a 5, b 7 (del), b 5. When iterating, lastKey = a. + // Then we see b 7, which is deleted. If we don't store lastKey = b, we'll then return b 5, + // which is wrong. Therefore, update lastKey here. + it.lastKey = y.SafeCopy(it.lastKey, mi.Key()) + } + +FILL: + // If deleted, advance and return. + vs := mi.Value() + if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) { + mi.Next() + return false + } + + item := it.newItem() + it.fill(item) + // fill item based on current cursor position. All Next calls have returned, so reaching here + // means no Next was called. + + mi.Next() // Advance but no fill item yet. + if !it.opt.Reverse || !mi.Valid() { // Forward direction, or invalid. + setItem(item) + return true + } + + // Reverse direction. + nextTs := y.ParseTs(mi.Key()) + mik := y.ParseKey(mi.Key()) + if nextTs <= it.readTs && bytes.Equal(mik, item.key) { + // This is a valid potential candidate. + goto FILL + } + // Ignore the next candidate. Return the current one. + setItem(item) + return true +} + +func (it *Iterator) fill(item *Item) { + vs := it.iitr.Value() + item.meta = vs.Meta + item.userMeta = vs.UserMeta + item.expiresAt = vs.ExpiresAt + + item.version = y.ParseTs(it.iitr.Key()) + item.key = y.SafeCopy(item.key, y.ParseKey(it.iitr.Key())) + + item.vptr = y.SafeCopy(item.vptr, vs.Value) + item.val = nil + if it.opt.PrefetchValues { + item.wg.Add(1) + go func() { + // FIXME we are not handling errors here. + item.prefetchValue() + item.wg.Done() + }() + } +} + +func (it *Iterator) prefetch() { + prefetchSize := 2 + if it.opt.PrefetchValues && it.opt.PrefetchSize > 1 { + prefetchSize = it.opt.PrefetchSize + } + + i := it.iitr + var count int + it.item = nil + for i.Valid() { + if !it.parseItem() { + continue + } + count++ + if count == prefetchSize { + break + } + } +} + +// Seek would seek to the provided key if present. If absent, it would seek to the next smallest key +// greater than the provided key if iterating in the forward direction. Behavior would be reversed if +// iterating backwards. +func (it *Iterator) Seek(key []byte) { + for i := it.data.pop(); i != nil; i = it.data.pop() { + i.wg.Wait() + it.waste.push(i) + } + + it.lastKey = it.lastKey[:0] + if len(key) == 0 { + key = it.opt.Prefix + } + if len(key) == 0 { + it.iitr.Rewind() + it.prefetch() + return + } + + if !it.opt.Reverse { + key = y.KeyWithTs(key, it.txn.readTs) + } else { + key = y.KeyWithTs(key, 0) + } + it.iitr.Seek(key) + it.prefetch() +} + +// Rewind would rewind the iterator cursor all the way to zero-th position, which would be the +// smallest key if iterating forward, and largest if iterating backward. It does not keep track of +// whether the cursor started with a Seek(). +func (it *Iterator) Rewind() { + it.Seek(nil) +} diff --git a/vendor/github.com/dgraph-io/badger/iterator_test.go b/vendor/github.com/dgraph-io/badger/iterator_test.go new file mode 100644 index 00000000..9404b075 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/iterator_test.go @@ -0,0 +1,244 @@ +/* + * Copyright 2018 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "bytes" + "fmt" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/require" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/options" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" +) + +type tableMock struct { + left, right []byte +} + +func (tm *tableMock) Smallest() []byte { return tm.left } +func (tm *tableMock) Biggest() []byte { return tm.right } +func (tm *tableMock) DoesNotHave(key []byte) bool { return false } + +func TestPickTables(t *testing.T) { + opt := DefaultIteratorOptions + + within := func(prefix, left, right string) { + opt.Prefix = []byte(prefix) + tm := &tableMock{left: []byte(left), right: []byte(right)} + require.True(t, opt.pickTable(tm)) + } + outside := func(prefix, left, right string) { + opt.Prefix = []byte(prefix) + tm := &tableMock{left: []byte(left), right: []byte(right)} + require.False(t, opt.pickTable(tm)) + } + within("abc", "ab", "ad") + within("abc", "abc", "ad") + within("abc", "abb123", "ad") + within("abc", "abc123", "abd234") + within("abc", "abc123", "abc456") + + outside("abd", "abe", "ad") + outside("abd", "ac", "ad") + outside("abd", "b", "e") + outside("abd", "a", "ab") + outside("abd", "ab", "abc") + outside("abd", "ab", "abc123") +} + +func TestIteratePrefix(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + bkey := func(i int) []byte { + return []byte(fmt.Sprintf("%04d", i)) + } + val := []byte("OK") + n := 10000 + + batch := db.NewWriteBatch() + for i := 0; i < n; i++ { + if (i % 1000) == 0 { + t.Logf("Put i=%d\n", i) + } + require.NoError(t, batch.Set(bkey(i), val, 0)) + } + require.NoError(t, batch.Flush()) + + countKeys := func(prefix string) int { + t.Logf("Testing with prefix: %s", prefix) + var count int + opt := DefaultIteratorOptions + opt.Prefix = []byte(prefix) + err := db.View(func(txn *Txn) error { + itr := txn.NewIterator(opt) + defer itr.Close() + for itr.Rewind(); itr.Valid(); itr.Next() { + item := itr.Item() + err := item.Value(func(v []byte) error { + require.Equal(t, val, v) + return nil + }) + require.NoError(t, err) + require.True(t, bytes.HasPrefix(item.Key(), opt.Prefix)) + count++ + } + return nil + }) + require.NoError(t, err) + return count + } + + countOneKey := func(key []byte) int { + var count int + err := db.View(func(txn *Txn) error { + itr := txn.NewKeyIterator(key, DefaultIteratorOptions) + defer itr.Close() + for itr.Rewind(); itr.Valid(); itr.Next() { + item := itr.Item() + err := item.Value(func(v []byte) error { + require.Equal(t, val, v) + return nil + }) + require.NoError(t, err) + require.Equal(t, key, item.Key()) + count++ + } + return nil + }) + require.NoError(t, err) + return count + } + + for i := 0; i <= 9; i++ { + require.Equal(t, 1, countKeys(fmt.Sprintf("%d%d%d%d", i, i, i, i))) + require.Equal(t, 10, countKeys(fmt.Sprintf("%d%d%d", i, i, i))) + require.Equal(t, 100, countKeys(fmt.Sprintf("%d%d", i, i))) + require.Equal(t, 1000, countKeys(fmt.Sprintf("%d", i))) + } + require.Equal(t, 10000, countKeys("")) + + t.Logf("Testing each key with key iterator") + for i := 0; i < n; i++ { + require.Equal(t, 1, countOneKey(bkey(i))) + } + }) +} + +// go test -v -run=XXX -bench=BenchmarkIterate -benchtime=3s +// Benchmark with opt.Prefix set === +// goos: linux +// goarch: amd64 +// pkg: github.com/dgraph-io/badger +// BenchmarkIteratePrefixSingleKey/Key_lookups-4 10000 365539 ns/op +// --- BENCH: BenchmarkIteratePrefixSingleKey/Key_lookups-4 +// iterator_test.go:147: Inner b.N: 1 +// iterator_test.go:147: Inner b.N: 100 +// iterator_test.go:147: Inner b.N: 10000 +// --- BENCH: BenchmarkIteratePrefixSingleKey +// iterator_test.go:143: LSM files: 79 +// iterator_test.go:145: Outer b.N: 1 +// PASS +// ok github.com/dgraph-io/badger 41.586s +// +// Benchmark with NO opt.Prefix set === +// goos: linux +// goarch: amd64 +// pkg: github.com/dgraph-io/badger +// BenchmarkIteratePrefixSingleKey/Key_lookups-4 10000 460924 ns/op +// --- BENCH: BenchmarkIteratePrefixSingleKey/Key_lookups-4 +// iterator_test.go:147: Inner b.N: 1 +// iterator_test.go:147: Inner b.N: 100 +// iterator_test.go:147: Inner b.N: 10000 +// --- BENCH: BenchmarkIteratePrefixSingleKey +// iterator_test.go:143: LSM files: 83 +// iterator_test.go:145: Outer b.N: 1 +// PASS +// ok github.com/dgraph-io/badger 41.836s +// +// Only my laptop there's a 20% improvement in latency with ~80 files. +func BenchmarkIteratePrefixSingleKey(b *testing.B) { + dir, err := ioutil.TempDir(".", "badger-test") + y.Check(err) + defer os.RemoveAll(dir) + opts := getTestOptions(dir) + opts.TableLoadingMode = options.LoadToRAM + db, err := Open(opts) + y.Check(err) + defer db.Close() + + N := 100000 // Should generate around 80 SSTables. + val := []byte("OK") + bkey := func(i int) []byte { + return []byte(fmt.Sprintf("%06d", i)) + } + + batch := db.NewWriteBatch() + for i := 0; i < N; i++ { + y.Check(batch.Set(bkey(i), val, 0)) + } + y.Check(batch.Flush()) + var lsmFiles int + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if strings.HasSuffix(path, ".sst") { + lsmFiles++ + } + if err != nil { + return err + } + return nil + }) + y.Check(err) + b.Logf("LSM files: %d", lsmFiles) + b.Logf("Key splits: %v", db.KeySplits(nil)) + b.Logf("Key splits with prefix: %v", db.KeySplits([]byte("09"))) + + b.Logf("Outer b.N: %d", b.N) + b.Run("Key lookups", func(b *testing.B) { + b.Logf("Inner b.N: %d", b.N) + for i := 0; i < b.N; i++ { + key := bkey(rand.Intn(N)) + err := db.View(func(txn *Txn) error { + opt := DefaultIteratorOptions + // NOTE: Comment opt.Prefix out here to compare the performance + // difference between providing Prefix as an option, v/s not. I + // see a 20% improvement when there are ~80 SSTables. + opt.Prefix = key + opt.AllVersions = true + + itr := txn.NewIterator(opt) + defer itr.Close() + + var count int + for itr.Seek(key); itr.ValidForPrefix(key); itr.Next() { + count++ + } + if count != 1 { + b.Fatalf("Count must be one key: %s. Found: %d", key, count) + } + return nil + }) + if err != nil { + b.Fatalf("Error while View: %v", err) + } + } + }) +} diff --git a/vendor/github.com/dgraph-io/badger/level_handler.go b/vendor/github.com/dgraph-io/badger/level_handler.go new file mode 100644 index 00000000..a33124d8 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/level_handler.go @@ -0,0 +1,304 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "fmt" + "sort" + "sync" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/table" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" + "gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors" +) + +type levelHandler struct { + // Guards tables, totalSize. + sync.RWMutex + + // For level >= 1, tables are sorted by key ranges, which do not overlap. + // For level 0, tables are sorted by time. + // For level 0, newest table are at the back. Compact the oldest one first, which is at the front. + tables []*table.Table + totalSize int64 + + // The following are initialized once and const. + level int + strLevel string + maxTotalSize int64 + db *DB +} + +func (s *levelHandler) getTotalSize() int64 { + s.RLock() + defer s.RUnlock() + return s.totalSize +} + +// initTables replaces s.tables with given tables. This is done during loading. +func (s *levelHandler) initTables(tables []*table.Table) { + s.Lock() + defer s.Unlock() + + s.tables = tables + s.totalSize = 0 + for _, t := range tables { + s.totalSize += t.Size() + } + + if s.level == 0 { + // Key range will overlap. Just sort by fileID in ascending order + // because newer tables are at the end of level 0. + sort.Slice(s.tables, func(i, j int) bool { + return s.tables[i].ID() < s.tables[j].ID() + }) + } else { + // Sort tables by keys. + sort.Slice(s.tables, func(i, j int) bool { + return y.CompareKeys(s.tables[i].Smallest(), s.tables[j].Smallest()) < 0 + }) + } +} + +// deleteTables remove tables idx0, ..., idx1-1. +func (s *levelHandler) deleteTables(toDel []*table.Table) error { + s.Lock() // s.Unlock() below + + toDelMap := make(map[uint64]struct{}) + for _, t := range toDel { + toDelMap[t.ID()] = struct{}{} + } + + // Make a copy as iterators might be keeping a slice of tables. + var newTables []*table.Table + for _, t := range s.tables { + _, found := toDelMap[t.ID()] + if !found { + newTables = append(newTables, t) + continue + } + s.totalSize -= t.Size() + } + s.tables = newTables + + s.Unlock() // Unlock s _before_ we DecrRef our tables, which can be slow. + + return decrRefs(toDel) +} + +// replaceTables will replace tables[left:right] with newTables. Note this EXCLUDES tables[right]. +// You must call decr() to delete the old tables _after_ writing the update to the manifest. +func (s *levelHandler) replaceTables(newTables []*table.Table) error { + // Need to re-search the range of tables in this level to be replaced as other goroutines might + // be changing it as well. (They can't touch our tables, but if they add/remove other tables, + // the indices get shifted around.) + if len(newTables) == 0 { + return nil + } + + s.Lock() // We s.Unlock() below. + + // Increase totalSize first. + for _, tbl := range newTables { + s.totalSize += tbl.Size() + tbl.IncrRef() + } + + kr := keyRange{ + left: newTables[0].Smallest(), + right: newTables[len(newTables)-1].Biggest(), + } + left, right := s.overlappingTables(levelHandlerRLocked{}, kr) + + toDecr := make([]*table.Table, right-left) + // Update totalSize and reference counts. + for i := left; i < right; i++ { + tbl := s.tables[i] + s.totalSize -= tbl.Size() + toDecr[i-left] = tbl + } + + // To be safe, just make a copy. TODO: Be more careful and avoid copying. + numDeleted := right - left + numAdded := len(newTables) + tables := make([]*table.Table, len(s.tables)-numDeleted+numAdded) + y.AssertTrue(left == copy(tables, s.tables[:left])) + t := tables[left:] + y.AssertTrue(numAdded == copy(t, newTables)) + t = t[numAdded:] + y.AssertTrue(len(s.tables[right:]) == copy(t, s.tables[right:])) + s.tables = tables + s.Unlock() // s.Unlock before we DecrRef tables -- that can be slow. + return decrRefs(toDecr) +} + +func decrRefs(tables []*table.Table) error { + for _, table := range tables { + if err := table.DecrRef(); err != nil { + return err + } + } + return nil +} + +func newLevelHandler(db *DB, level int) *levelHandler { + return &levelHandler{ + level: level, + strLevel: fmt.Sprintf("l%d", level), + db: db, + } +} + +// tryAddLevel0Table returns true if ok and no stalling. +func (s *levelHandler) tryAddLevel0Table(t *table.Table) bool { + y.AssertTrue(s.level == 0) + // Need lock as we may be deleting the first table during a level 0 compaction. + s.Lock() + defer s.Unlock() + if len(s.tables) >= s.db.opt.NumLevelZeroTablesStall { + return false + } + + s.tables = append(s.tables, t) + t.IncrRef() + s.totalSize += t.Size() + + return true +} + +func (s *levelHandler) numTables() int { + s.RLock() + defer s.RUnlock() + return len(s.tables) +} + +func (s *levelHandler) close() error { + s.RLock() + defer s.RUnlock() + var err error + for _, t := range s.tables { + if closeErr := t.Close(); closeErr != nil && err == nil { + err = closeErr + } + } + return errors.Wrap(err, "levelHandler.close") +} + +// getTableForKey acquires a read-lock to access s.tables. It returns a list of tableHandlers. +func (s *levelHandler) getTableForKey(key []byte) ([]*table.Table, func() error) { + s.RLock() + defer s.RUnlock() + + if s.level == 0 { + // For level 0, we need to check every table. Remember to make a copy as s.tables may change + // once we exit this function, and we don't want to lock s.tables while seeking in tables. + // CAUTION: Reverse the tables. + out := make([]*table.Table, 0, len(s.tables)) + for i := len(s.tables) - 1; i >= 0; i-- { + out = append(out, s.tables[i]) + s.tables[i].IncrRef() + } + return out, func() error { + for _, t := range out { + if err := t.DecrRef(); err != nil { + return err + } + } + return nil + } + } + // For level >= 1, we can do a binary search as key range does not overlap. + idx := sort.Search(len(s.tables), func(i int) bool { + return y.CompareKeys(s.tables[i].Biggest(), key) >= 0 + }) + if idx >= len(s.tables) { + // Given key is strictly > than every element we have. + return nil, func() error { return nil } + } + tbl := s.tables[idx] + tbl.IncrRef() + return []*table.Table{tbl}, tbl.DecrRef +} + +// get returns value for a given key or the key after that. If not found, return nil. +func (s *levelHandler) get(key []byte) (y.ValueStruct, error) { + tables, decr := s.getTableForKey(key) + keyNoTs := y.ParseKey(key) + + var maxVs y.ValueStruct + for _, th := range tables { + if th.DoesNotHave(keyNoTs) { + y.NumLSMBloomHits.Add(s.strLevel, 1) + continue + } + + it := th.NewIterator(false) + defer it.Close() + + y.NumLSMGets.Add(s.strLevel, 1) + it.Seek(key) + if !it.Valid() { + continue + } + if y.SameKey(key, it.Key()) { + if version := y.ParseTs(it.Key()); maxVs.Version < version { + maxVs = it.Value() + maxVs.Version = version + } + } + } + return maxVs, decr() +} + +// appendIterators appends iterators to an array of iterators, for merging. +// Note: This obtains references for the table handlers. Remember to close these iterators. +func (s *levelHandler) appendIterators(iters []y.Iterator, opt *IteratorOptions) []y.Iterator { + s.RLock() + defer s.RUnlock() + + tables := make([]*table.Table, 0, len(s.tables)) + for _, t := range s.tables { + if opt.pickTable(t) { + tables = append(tables, t) + } + } + if len(tables) == 0 { + return iters + } + + if s.level == 0 { + // Remember to add in reverse order! + // The newer table at the end of s.tables should be added first as it takes precedence. + return appendIteratorsReversed(iters, tables, opt.Reverse) + } + return append(iters, table.NewConcatIterator(tables, opt.Reverse)) +} + +type levelHandlerRLocked struct{} + +// overlappingTables returns the tables that intersect with key range. Returns a half-interval. +// This function should already have acquired a read lock, and this is so important the caller must +// pass an empty parameter declaring such. +func (s *levelHandler) overlappingTables(_ levelHandlerRLocked, kr keyRange) (int, int) { + left := sort.Search(len(s.tables), func(i int) bool { + return y.CompareKeys(kr.left, s.tables[i].Biggest()) <= 0 + }) + right := sort.Search(len(s.tables), func(i int) bool { + return y.CompareKeys(kr.right, s.tables[i].Smallest()) < 0 + }) + return left, right +} diff --git a/vendor/github.com/dgraph-io/badger/levels.go b/vendor/github.com/dgraph-io/badger/levels.go new file mode 100644 index 00000000..8b8ad3c7 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/levels.go @@ -0,0 +1,886 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "fmt" + "math" + "math/rand" + "os" + "sort" + "strings" + "sync" + "sync/atomic" + "time" + + "gx/ipfs/QmRvYNctevGUW52urgmoFZscT6buMKqhHezLUS64WepGWn/go-net/trace" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/pb" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/table" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" + "gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors" +) + +type levelsController struct { + nextFileID uint64 // Atomic + elog trace.EventLog + + // The following are initialized once and const. + levels []*levelHandler + kv *DB + + cstatus compactStatus +} + +var ( + // This is for getting timings between stalls. + lastUnstalled time.Time +) + +// revertToManifest checks that all necessary table files exist and removes all table files not +// referenced by the manifest. idMap is a set of table file id's that were read from the directory +// listing. +func revertToManifest(kv *DB, mf *Manifest, idMap map[uint64]struct{}) error { + // 1. Check all files in manifest exist. + for id := range mf.Tables { + if _, ok := idMap[id]; !ok { + return fmt.Errorf("file does not exist for table %d", id) + } + } + + // 2. Delete files that shouldn't exist. + for id := range idMap { + if _, ok := mf.Tables[id]; !ok { + kv.elog.Printf("Table file %d not referenced in MANIFEST\n", id) + filename := table.NewFilename(id, kv.opt.Dir) + if err := os.Remove(filename); err != nil { + return y.Wrapf(err, "While removing table %d", id) + } + } + } + + return nil +} + +func newLevelsController(db *DB, mf *Manifest) (*levelsController, error) { + y.AssertTrue(db.opt.NumLevelZeroTablesStall > db.opt.NumLevelZeroTables) + s := &levelsController{ + kv: db, + elog: db.elog, + levels: make([]*levelHandler, db.opt.MaxLevels), + } + s.cstatus.levels = make([]*levelCompactStatus, db.opt.MaxLevels) + + for i := 0; i < db.opt.MaxLevels; i++ { + s.levels[i] = newLevelHandler(db, i) + if i == 0 { + // Do nothing. + } else if i == 1 { + // Level 1 probably shouldn't be too much bigger than level 0. + s.levels[i].maxTotalSize = db.opt.LevelOneSize + } else { + s.levels[i].maxTotalSize = s.levels[i-1].maxTotalSize * int64(db.opt.LevelSizeMultiplier) + } + s.cstatus.levels[i] = new(levelCompactStatus) + } + + // Compare manifest against directory, check for existent/non-existent files, and remove. + if err := revertToManifest(db, mf, getIDMap(db.opt.Dir)); err != nil { + return nil, err + } + + // Some files may be deleted. Let's reload. + var flags uint32 = y.Sync + if db.opt.ReadOnly { + flags |= y.ReadOnly + } + + var mu sync.Mutex + tables := make([][]*table.Table, db.opt.MaxLevels) + var maxFileID uint64 + + // We found that using 3 goroutines allows disk throughput to be utilized to its max. + // Disk utilization is the main thing we should focus on, while trying to read the data. That's + // the one factor that remains constant between HDD and SSD. + throttle := y.NewThrottle(3) + + start := time.Now() + var numOpened int32 + tick := time.NewTicker(3 * time.Second) + defer tick.Stop() + + for fileID, tf := range mf.Tables { + fname := table.NewFilename(fileID, db.opt.Dir) + select { + case <-tick.C: + db.opt.Infof("%d tables out of %d opened in %s\n", atomic.LoadInt32(&numOpened), + len(mf.Tables), time.Since(start).Round(time.Millisecond)) + default: + } + if err := throttle.Do(); err != nil { + closeAllTables(tables) + return nil, err + } + if fileID > maxFileID { + maxFileID = fileID + } + go func(fname string, tf TableManifest) { + var rerr error + defer func() { + throttle.Done(rerr) + atomic.AddInt32(&numOpened, 1) + }() + fd, err := y.OpenExistingFile(fname, flags) + if err != nil { + rerr = errors.Wrapf(err, "Opening file: %q", fname) + return + } + + t, err := table.OpenTable(fd, db.opt.TableLoadingMode, tf.Checksum) + if err != nil { + if strings.HasPrefix(err.Error(), "CHECKSUM_MISMATCH:") { + db.opt.Errorf(err.Error()) + db.opt.Errorf("Ignoring table %s", fd.Name()) + // Do not set rerr. We will continue without this table. + } else { + rerr = errors.Wrapf(err, "Opening table: %q", fname) + } + return + } + + mu.Lock() + tables[tf.Level] = append(tables[tf.Level], t) + mu.Unlock() + }(fname, tf) + } + if err := throttle.Finish(); err != nil { + closeAllTables(tables) + return nil, err + } + db.opt.Infof("All %d tables opened in %s\n", atomic.LoadInt32(&numOpened), + time.Since(start).Round(time.Millisecond)) + s.nextFileID = maxFileID + 1 + for i, tbls := range tables { + s.levels[i].initTables(tbls) + } + + // Make sure key ranges do not overlap etc. + if err := s.validate(); err != nil { + _ = s.cleanupLevels() + return nil, errors.Wrap(err, "Level validation") + } + + // Sync directory (because we have at least removed some files, or previously created the + // manifest file). + if err := syncDir(db.opt.Dir); err != nil { + _ = s.close() + return nil, err + } + + return s, nil +} + +// Closes the tables, for cleanup in newLevelsController. (We Close() instead of using DecrRef() +// because that would delete the underlying files.) We ignore errors, which is OK because tables +// are read-only. +func closeAllTables(tables [][]*table.Table) { + for _, tableSlice := range tables { + for _, table := range tableSlice { + _ = table.Close() + } + } +} + +func (s *levelsController) cleanupLevels() error { + var firstErr error + for _, l := range s.levels { + if err := l.close(); err != nil && firstErr == nil { + firstErr = err + } + } + return firstErr +} + +// This function picks all tables from all levels, creates a manifest changeset, +// applies it, and then decrements the refs of these tables, which would result +// in their deletion. +func (s *levelsController) deleteLSMTree() (int, error) { + // First pick all tables, so we can create a manifest changelog. + var all []*table.Table + for _, l := range s.levels { + l.RLock() + all = append(all, l.tables...) + l.RUnlock() + } + if len(all) == 0 { + return 0, nil + } + + // Generate the manifest changes. + changes := []*pb.ManifestChange{} + for _, table := range all { + changes = append(changes, newDeleteChange(table.ID())) + } + changeSet := pb.ManifestChangeSet{Changes: changes} + if err := s.kv.manifest.addChanges(changeSet.Changes); err != nil { + return 0, err + } + + // Now that manifest has been successfully written, we can delete the tables. + for _, l := range s.levels { + l.Lock() + l.totalSize = 0 + l.tables = l.tables[:0] + l.Unlock() + } + for _, table := range all { + if err := table.DecrRef(); err != nil { + return 0, err + } + } + return len(all), nil +} + +func (s *levelsController) startCompact(lc *y.Closer) { + n := s.kv.opt.NumCompactors + lc.AddRunning(n - 1) + for i := 0; i < n; i++ { + go s.runWorker(lc) + } +} + +func (s *levelsController) runWorker(lc *y.Closer) { + defer lc.Done() + + randomDelay := time.NewTimer(time.Duration(rand.Int31n(1000)) * time.Millisecond) + select { + case <-randomDelay.C: + case <-lc.HasBeenClosed(): + randomDelay.Stop() + return + } + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + for { + select { + // Can add a done channel or other stuff. + case <-ticker.C: + prios := s.pickCompactLevels() + for _, p := range prios { + if err := s.doCompact(p); err == nil { + break + } else if err == errFillTables { + // pass + } else { + s.kv.opt.Warningf("While running doCompact: %v\n", err) + } + } + case <-lc.HasBeenClosed(): + return + } + } +} + +// Returns true if level zero may be compacted, without accounting for compactions that already +// might be happening. +func (s *levelsController) isLevel0Compactable() bool { + return s.levels[0].numTables() >= s.kv.opt.NumLevelZeroTables +} + +// Returns true if the non-zero level may be compacted. delSize provides the size of the tables +// which are currently being compacted so that we treat them as already having started being +// compacted (because they have been, yet their size is already counted in getTotalSize). +func (l *levelHandler) isCompactable(delSize int64) bool { + return l.getTotalSize()-delSize >= l.maxTotalSize +} + +type compactionPriority struct { + level int + score float64 +} + +// pickCompactLevel determines which level to compact. +// Based on: https://github.com/facebook/rocksdb/wiki/Leveled-Compaction +func (s *levelsController) pickCompactLevels() (prios []compactionPriority) { + // This function must use identical criteria for guaranteeing compaction's progress that + // addLevel0Table uses. + + // cstatus is checked to see if level 0's tables are already being compacted + if !s.cstatus.overlapsWith(0, infRange) && s.isLevel0Compactable() { + pri := compactionPriority{ + level: 0, + score: float64(s.levels[0].numTables()) / float64(s.kv.opt.NumLevelZeroTables), + } + prios = append(prios, pri) + } + + for i, l := range s.levels[1:] { + // Don't consider those tables that are already being compacted right now. + delSize := s.cstatus.delSize(i + 1) + + if l.isCompactable(delSize) { + pri := compactionPriority{ + level: i + 1, + score: float64(l.getTotalSize()-delSize) / float64(l.maxTotalSize), + } + prios = append(prios, pri) + } + } + sort.Slice(prios, func(i, j int) bool { + return prios[i].score > prios[j].score + }) + return prios +} + +// compactBuildTables merge topTables and botTables to form a list of new tables. +func (s *levelsController) compactBuildTables( + l int, cd compactDef) ([]*table.Table, func() error, error) { + topTables := cd.top + botTables := cd.bot + + var hasOverlap bool + { + kr := getKeyRange(cd.top) + for i, lh := range s.levels { + if i <= l { // Skip upper levels. + continue + } + lh.RLock() + left, right := lh.overlappingTables(levelHandlerRLocked{}, kr) + lh.RUnlock() + if right-left > 0 { + hasOverlap = true + break + } + } + cd.elog.LazyPrintf("Key range overlaps with lower levels: %v", hasOverlap) + } + + // Try to collect stats so that we can inform value log about GC. That would help us find which + // value log file should be GCed. + discardStats := make(map[uint32]int64) + updateStats := func(vs y.ValueStruct) { + if vs.Meta&bitValuePointer > 0 { + var vp valuePointer + vp.Decode(vs.Value) + discardStats[vp.Fid] += int64(vp.Len) + } + } + + // Create iterators across all the tables involved first. + var iters []y.Iterator + if l == 0 { + iters = appendIteratorsReversed(iters, topTables, false) + } else { + y.AssertTrue(len(topTables) == 1) + iters = []y.Iterator{topTables[0].NewIterator(false)} + } + + // Next level has level>=1 and we can use ConcatIterator as key ranges do not overlap. + iters = append(iters, table.NewConcatIterator(botTables, false)) + it := y.NewMergeIterator(iters, false) + defer it.Close() // Important to close the iterator to do ref counting. + + it.Rewind() + + // Pick a discard ts, so we can discard versions below this ts. We should + // never discard any versions starting from above this timestamp, because + // that would affect the snapshot view guarantee provided by transactions. + discardTs := s.kv.orc.discardAtOrBelow() + + // Start generating new tables. + type newTableResult struct { + table *table.Table + err error + } + resultCh := make(chan newTableResult) + var numBuilds, numVersions int + var lastKey, skipKey []byte + for it.Valid() { + timeStart := time.Now() + builder := table.NewTableBuilder() + var numKeys, numSkips uint64 + for ; it.Valid(); it.Next() { + // See if we need to skip this key. + if len(skipKey) > 0 { + if y.SameKey(it.Key(), skipKey) { + numSkips++ + updateStats(it.Value()) + continue + } else { + skipKey = skipKey[:0] + } + } + + if !y.SameKey(it.Key(), lastKey) { + if builder.ReachedCapacity(s.kv.opt.MaxTableSize) { + // Only break if we are on a different key, and have reached capacity. We want + // to ensure that all versions of the key are stored in the same sstable, and + // not divided across multiple tables at the same level. + break + } + lastKey = y.SafeCopy(lastKey, it.Key()) + numVersions = 0 + } + + vs := it.Value() + version := y.ParseTs(it.Key()) + if version <= discardTs { + // Keep track of the number of versions encountered for this key. Only consider the + // versions which are below the minReadTs, otherwise, we might end up discarding the + // only valid version for a running transaction. + numVersions++ + lastValidVersion := vs.Meta&bitDiscardEarlierVersions > 0 + if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) || + numVersions > s.kv.opt.NumVersionsToKeep || + lastValidVersion { + // If this version of the key is deleted or expired, skip all the rest of the + // versions. Ensure that we're only removing versions below readTs. + skipKey = y.SafeCopy(skipKey, it.Key()) + + if lastValidVersion { + // Add this key. We have set skipKey, so the following key versions + // would be skipped. + } else if hasOverlap { + // If this key range has overlap with lower levels, then keep the deletion + // marker with the latest version, discarding the rest. We have set skipKey, + // so the following key versions would be skipped. + } else { + // If no overlap, we can skip all the versions, by continuing here. + numSkips++ + updateStats(vs) + continue // Skip adding this key. + } + } + } + numKeys++ + y.Check(builder.Add(it.Key(), it.Value())) + } + // It was true that it.Valid() at least once in the loop above, which means we + // called Add() at least once, and builder is not Empty(). + cd.elog.LazyPrintf("Added %d keys. Skipped %d keys.", numKeys, numSkips) + cd.elog.LazyPrintf("LOG Compact. Iteration took: %v\n", time.Since(timeStart)) + if !builder.Empty() { + numBuilds++ + fileID := s.reserveFileID() + go func(builder *table.Builder) { + defer builder.Close() + + fd, err := y.CreateSyncedFile(table.NewFilename(fileID, s.kv.opt.Dir), true) + if err != nil { + resultCh <- newTableResult{nil, errors.Wrapf(err, "While opening new table: %d", fileID)} + return + } + + if _, err := fd.Write(builder.Finish()); err != nil { + resultCh <- newTableResult{nil, errors.Wrapf(err, "Unable to write to file: %d", fileID)} + return + } + + tbl, err := table.OpenTable(fd, s.kv.opt.TableLoadingMode, nil) + // decrRef is added below. + resultCh <- newTableResult{tbl, errors.Wrapf(err, "Unable to open table: %q", fd.Name())} + }(builder) + } + } + + newTables := make([]*table.Table, 0, 20) + // Wait for all table builders to finish. + var firstErr error + for x := 0; x < numBuilds; x++ { + res := <-resultCh + newTables = append(newTables, res.table) + if firstErr == nil { + firstErr = res.err + } + } + + if firstErr == nil { + // Ensure created files' directory entries are visible. We don't mind the extra latency + // from not doing this ASAP after all file creation has finished because this is a + // background operation. + firstErr = syncDir(s.kv.opt.Dir) + } + + if firstErr != nil { + // An error happened. Delete all the newly created table files (by calling DecrRef + // -- we're the only holders of a ref). + for j := 0; j < numBuilds; j++ { + if newTables[j] != nil { + newTables[j].DecrRef() + } + } + errorReturn := errors.Wrapf(firstErr, "While running compaction for: %+v", cd) + return nil, nil, errorReturn + } + + sort.Slice(newTables, func(i, j int) bool { + return y.CompareKeys(newTables[i].Biggest(), newTables[j].Biggest()) < 0 + }) + s.kv.vlog.updateGCStats(discardStats) + cd.elog.LazyPrintf("Discard stats: %v", discardStats) + return newTables, func() error { return decrRefs(newTables) }, nil +} + +func buildChangeSet(cd *compactDef, newTables []*table.Table) pb.ManifestChangeSet { + changes := []*pb.ManifestChange{} + for _, table := range newTables { + changes = append(changes, + newCreateChange(table.ID(), cd.nextLevel.level, table.Checksum)) + } + for _, table := range cd.top { + changes = append(changes, newDeleteChange(table.ID())) + } + for _, table := range cd.bot { + changes = append(changes, newDeleteChange(table.ID())) + } + return pb.ManifestChangeSet{Changes: changes} +} + +type compactDef struct { + elog trace.Trace + + thisLevel *levelHandler + nextLevel *levelHandler + + top []*table.Table + bot []*table.Table + + thisRange keyRange + nextRange keyRange + + thisSize int64 +} + +func (cd *compactDef) lockLevels() { + cd.thisLevel.RLock() + cd.nextLevel.RLock() +} + +func (cd *compactDef) unlockLevels() { + cd.nextLevel.RUnlock() + cd.thisLevel.RUnlock() +} + +func (s *levelsController) fillTablesL0(cd *compactDef) bool { + cd.lockLevels() + defer cd.unlockLevels() + + cd.top = make([]*table.Table, len(cd.thisLevel.tables)) + copy(cd.top, cd.thisLevel.tables) + if len(cd.top) == 0 { + return false + } + cd.thisRange = infRange + + kr := getKeyRange(cd.top) + left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, kr) + cd.bot = make([]*table.Table, right-left) + copy(cd.bot, cd.nextLevel.tables[left:right]) + + if len(cd.bot) == 0 { + cd.nextRange = kr + } else { + cd.nextRange = getKeyRange(cd.bot) + } + + if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) { + return false + } + + return true +} + +func (s *levelsController) fillTables(cd *compactDef) bool { + cd.lockLevels() + defer cd.unlockLevels() + + tbls := make([]*table.Table, len(cd.thisLevel.tables)) + copy(tbls, cd.thisLevel.tables) + if len(tbls) == 0 { + return false + } + + // Find the biggest table, and compact that first. + // TODO: Try other table picking strategies. + sort.Slice(tbls, func(i, j int) bool { + return tbls[i].Size() > tbls[j].Size() + }) + + for _, t := range tbls { + cd.thisSize = t.Size() + cd.thisRange = keyRange{ + // We pick all the versions of the smallest and the biggest key. + left: y.KeyWithTs(y.ParseKey(t.Smallest()), math.MaxUint64), + // Note that version zero would be the rightmost key. + right: y.KeyWithTs(y.ParseKey(t.Biggest()), 0), + } + if s.cstatus.overlapsWith(cd.thisLevel.level, cd.thisRange) { + continue + } + cd.top = []*table.Table{t} + left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, cd.thisRange) + + cd.bot = make([]*table.Table, right-left) + copy(cd.bot, cd.nextLevel.tables[left:right]) + + if len(cd.bot) == 0 { + cd.bot = []*table.Table{} + cd.nextRange = cd.thisRange + if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) { + continue + } + return true + } + cd.nextRange = getKeyRange(cd.bot) + + if s.cstatus.overlapsWith(cd.nextLevel.level, cd.nextRange) { + continue + } + if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) { + continue + } + return true + } + return false +} + +func (s *levelsController) runCompactDef(l int, cd compactDef) (err error) { + timeStart := time.Now() + + thisLevel := cd.thisLevel + nextLevel := cd.nextLevel + + // Table should never be moved directly between levels, always be rewritten to allow discarding + // invalid versions. + + newTables, decr, err := s.compactBuildTables(l, cd) + if err != nil { + return err + } + defer func() { + // Only assign to err, if it's not already nil. + if decErr := decr(); err == nil { + err = decErr + } + }() + changeSet := buildChangeSet(&cd, newTables) + + // We write to the manifest _before_ we delete files (and after we created files) + if err := s.kv.manifest.addChanges(changeSet.Changes); err != nil { + return err + } + + // See comment earlier in this function about the ordering of these ops, and the order in which + // we access levels when reading. + if err := nextLevel.replaceTables(newTables); err != nil { + return err + } + if err := thisLevel.deleteTables(cd.top); err != nil { + return err + } + + // Note: For level 0, while doCompact is running, it is possible that new tables are added. + // However, the tables are added only to the end, so it is ok to just delete the first table. + + cd.elog.LazyPrintf("LOG Compact %d->%d, del %d tables, add %d tables, took %v\n", + l, l+1, len(cd.top)+len(cd.bot), len(newTables), time.Since(timeStart)) + return nil +} + +var errFillTables = errors.New("Unable to fill tables") + +// doCompact picks some table on level l and compacts it away to the next level. +func (s *levelsController) doCompact(p compactionPriority) error { + l := p.level + y.AssertTrue(l+1 < s.kv.opt.MaxLevels) // Sanity check. + + cd := compactDef{ + elog: trace.New(fmt.Sprintf("Badger.L%d", l), "Compact"), + thisLevel: s.levels[l], + nextLevel: s.levels[l+1], + } + cd.elog.SetMaxEvents(100) + defer cd.elog.Finish() + + cd.elog.LazyPrintf("Got compaction priority: %+v", p) + + // While picking tables to be compacted, both levels' tables are expected to + // remain unchanged. + if l == 0 { + if !s.fillTablesL0(&cd) { + cd.elog.LazyPrintf("fillTables failed for level: %d\n", l) + return errFillTables + } + + } else { + if !s.fillTables(&cd) { + cd.elog.LazyPrintf("fillTables failed for level: %d\n", l) + return errFillTables + } + } + defer s.cstatus.delete(cd) // Remove the ranges from compaction status. + + cd.elog.LazyPrintf("Running for level: %d\n", cd.thisLevel.level) + s.cstatus.toLog(cd.elog) + if err := s.runCompactDef(l, cd); err != nil { + // This compaction couldn't be done successfully. + cd.elog.LazyPrintf("\tLOG Compact FAILED with error: %+v: %+v", err, cd) + return err + } + + s.cstatus.toLog(cd.elog) + cd.elog.LazyPrintf("Compaction for level: %d DONE", cd.thisLevel.level) + return nil +} + +func (s *levelsController) addLevel0Table(t *table.Table) error { + // We update the manifest _before_ the table becomes part of a levelHandler, because at that + // point it could get used in some compaction. This ensures the manifest file gets updated in + // the proper order. (That means this update happens before that of some compaction which + // deletes the table.) + err := s.kv.manifest.addChanges([]*pb.ManifestChange{ + newCreateChange(t.ID(), 0, t.Checksum), + }) + if err != nil { + return err + } + + for !s.levels[0].tryAddLevel0Table(t) { + // Stall. Make sure all levels are healthy before we unstall. + var timeStart time.Time + { + s.elog.Printf("STALLED STALLED STALLED: %v\n", time.Since(lastUnstalled)) + s.cstatus.RLock() + for i := 0; i < s.kv.opt.MaxLevels; i++ { + s.elog.Printf("level=%d. Status=%s Size=%d\n", + i, s.cstatus.levels[i].debug(), s.levels[i].getTotalSize()) + } + s.cstatus.RUnlock() + timeStart = time.Now() + } + // Before we unstall, we need to make sure that level 0 and 1 are healthy. Otherwise, we + // will very quickly fill up level 0 again and if the compaction strategy favors level 0, + // then level 1 is going to super full. + for i := 0; ; i++ { + // Passing 0 for delSize to compactable means we're treating incomplete compactions as + // not having finished -- we wait for them to finish. Also, it's crucial this behavior + // replicates pickCompactLevels' behavior in computing compactability in order to + // guarantee progress. + if !s.isLevel0Compactable() && !s.levels[1].isCompactable(0) { + break + } + time.Sleep(10 * time.Millisecond) + if i%100 == 0 { + prios := s.pickCompactLevels() + s.elog.Printf("Waiting to add level 0 table. Compaction priorities: %+v\n", prios) + i = 0 + } + } + { + s.elog.Printf("UNSTALLED UNSTALLED UNSTALLED: %v\n", time.Since(timeStart)) + lastUnstalled = time.Now() + } + } + + return nil +} + +func (s *levelsController) close() error { + err := s.cleanupLevels() + return errors.Wrap(err, "levelsController.Close") +} + +// get returns the found value if any. If not found, we return nil. +func (s *levelsController) get(key []byte, maxVs *y.ValueStruct) (y.ValueStruct, error) { + // It's important that we iterate the levels from 0 on upward. The reason is, if we iterated + // in opposite order, or in parallel (naively calling all the h.RLock() in some order) we could + // read level L's tables post-compaction and level L+1's tables pre-compaction. (If we do + // parallelize this, we will need to call the h.RLock() function by increasing order of level + // number.) + version := y.ParseTs(key) + for _, h := range s.levels { + vs, err := h.get(key) // Calls h.RLock() and h.RUnlock(). + if err != nil { + return y.ValueStruct{}, errors.Wrapf(err, "get key: %q", key) + } + if vs.Value == nil && vs.Meta == 0 { + continue + } + if maxVs == nil || vs.Version == version { + return vs, nil + } + if maxVs.Version < vs.Version { + *maxVs = vs + } + } + if maxVs != nil { + return *maxVs, nil + } + return y.ValueStruct{}, nil +} + +func appendIteratorsReversed(out []y.Iterator, th []*table.Table, reversed bool) []y.Iterator { + for i := len(th) - 1; i >= 0; i-- { + // This will increment the reference of the table handler. + out = append(out, th[i].NewIterator(reversed)) + } + return out +} + +// appendIterators appends iterators to an array of iterators, for merging. +// Note: This obtains references for the table handlers. Remember to close these iterators. +func (s *levelsController) appendIterators( + iters []y.Iterator, opt *IteratorOptions) []y.Iterator { + // Just like with get, it's important we iterate the levels from 0 on upward, to avoid missing + // data when there's a compaction. + for _, level := range s.levels { + iters = level.appendIterators(iters, opt) + } + return iters +} + +// TableInfo represents the information about a table. +type TableInfo struct { + ID uint64 + Level int + Left []byte + Right []byte +} + +func (s *levelsController) getTableInfo() (result []TableInfo) { + for _, l := range s.levels { + for _, t := range l.tables { + info := TableInfo{ + ID: t.ID(), + Level: l.level, + Left: t.Smallest(), + Right: t.Biggest(), + } + result = append(result, info) + } + } + sort.Slice(result, func(i, j int) bool { + if result[i].Level != result[j].Level { + return result[i].Level < result[j].Level + } + return result[i].ID < result[j].ID + }) + return +} diff --git a/vendor/github.com/dgraph-io/badger/logger.go b/vendor/github.com/dgraph-io/badger/logger.go new file mode 100644 index 00000000..c8e64c21 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/logger.go @@ -0,0 +1,72 @@ +/* + * Copyright 2018 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "log" + "os" +) + +// Logger is implemented by any logging system that is used for standard logs. +type Logger interface { + Errorf(string, ...interface{}) + Infof(string, ...interface{}) + Warningf(string, ...interface{}) +} + +// Errorf logs an ERROR log message to the logger specified in opts or to the +// global logger if no logger is specified in opts. +func (opt *Options) Errorf(format string, v ...interface{}) { + if opt.Logger == nil { + return + } + opt.Logger.Errorf(format, v...) +} + +// Infof logs an INFO message to the logger specified in opts. +func (opt *Options) Infof(format string, v ...interface{}) { + if opt.Logger == nil { + return + } + opt.Logger.Infof(format, v...) +} + +// Warningf logs a WARNING message to the logger specified in opts. +func (opt *Options) Warningf(format string, v ...interface{}) { + if opt.Logger == nil { + return + } + opt.Logger.Warningf(format, v...) +} + +type defaultLog struct { + *log.Logger +} + +var defaultLogger = &defaultLog{Logger: log.New(os.Stderr, "badger ", log.LstdFlags)} + +func (l *defaultLog) Errorf(f string, v ...interface{}) { + l.Printf("ERROR: "+f, v...) +} + +func (l *defaultLog) Infof(f string, v ...interface{}) { + l.Printf("INFO: "+f, v...) +} + +func (l *defaultLog) Warningf(f string, v ...interface{}) { + l.Printf("WARNING: "+f, v...) +} diff --git a/vendor/github.com/dgraph-io/badger/logger_test.go b/vendor/github.com/dgraph-io/badger/logger_test.go new file mode 100644 index 00000000..321c379a --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/logger_test.go @@ -0,0 +1,67 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +type mockLogger struct { + output string +} + +func (l *mockLogger) Errorf(f string, v ...interface{}) { + l.output = fmt.Sprintf("ERROR: "+f, v...) +} + +func (l *mockLogger) Infof(f string, v ...interface{}) { + l.output = fmt.Sprintf("INFO: "+f, v...) +} + +func (l *mockLogger) Warningf(f string, v ...interface{}) { + l.output = fmt.Sprintf("WARNING: "+f, v...) +} + +// Test that the DB-specific log is used instead of the global log. +func TestDbLog(t *testing.T) { + l := &mockLogger{} + opt := Options{Logger: l} + + opt.Errorf("test") + require.Equal(t, "ERROR: test", l.output) + opt.Infof("test") + require.Equal(t, "INFO: test", l.output) + opt.Warningf("test") + require.Equal(t, "WARNING: test", l.output) +} + +// Test that the global logger is used when no logger is specified in Options. +func TestNoDbLog(t *testing.T) { + l := &mockLogger{} + opt := Options{} + opt.Logger = l + + opt.Errorf("test") + require.Equal(t, "ERROR: test", l.output) + opt.Infof("test") + require.Equal(t, "INFO: test", l.output) + opt.Warningf("test") + require.Equal(t, "WARNING: test", l.output) +} diff --git a/vendor/github.com/dgraph-io/badger/managed_db.go b/vendor/github.com/dgraph-io/badger/managed_db.go new file mode 100644 index 00000000..4de226ae --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/managed_db.go @@ -0,0 +1,68 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +// OpenManaged returns a new DB, which allows more control over setting +// transaction timestamps, aka managed mode. +// +// This is only useful for databases built on top of Badger (like Dgraph), and +// can be ignored by most users. +func OpenManaged(opts Options) (*DB, error) { + opts.managedTxns = true + return Open(opts) +} + +// NewTransactionAt follows the same logic as DB.NewTransaction(), but uses the +// provided read timestamp. +// +// This is only useful for databases built on top of Badger (like Dgraph), and +// can be ignored by most users. +func (db *DB) NewTransactionAt(readTs uint64, update bool) *Txn { + if !db.opt.managedTxns { + panic("Cannot use NewTransactionAt with managedDB=false. Use NewTransaction instead.") + } + txn := db.newTransaction(update, true) + txn.readTs = readTs + return txn +} + +// CommitAt commits the transaction, following the same logic as Commit(), but +// at the given commit timestamp. This will panic if not used with managed transactions. +// +// This is only useful for databases built on top of Badger (like Dgraph), and +// can be ignored by most users. +func (txn *Txn) CommitAt(commitTs uint64, callback func(error)) error { + if !txn.db.opt.managedTxns { + panic("Cannot use CommitAt with managedDB=false. Use Commit instead.") + } + txn.commitTs = commitTs + if callback == nil { + return txn.Commit() + } + txn.CommitWith(callback) + return nil +} + +// SetDiscardTs sets a timestamp at or below which, any invalid or deleted +// versions can be discarded from the LSM tree, and thence from the value log to +// reclaim disk space. Can only be used with managed transactions. +func (db *DB) SetDiscardTs(ts uint64) { + if !db.opt.managedTxns { + panic("Cannot use SetDiscardTs with managedDB=false.") + } + db.orc.setDiscardTs(ts) +} diff --git a/vendor/github.com/dgraph-io/badger/managed_db_test.go b/vendor/github.com/dgraph-io/badger/managed_db_test.go new file mode 100644 index 00000000..dbb84c82 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/managed_db_test.go @@ -0,0 +1,353 @@ +package badger + +import ( + "io/ioutil" + "math" + "math/rand" + "os" + "runtime" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" +) + +func val(large bool) []byte { + var buf []byte + if large { + buf = make([]byte, 8192) + } else { + buf = make([]byte, 16) + } + rand.Read(buf) + return buf +} + +func numKeys(db *DB) int { + var count int + err := db.View(func(txn *Txn) error { + itr := txn.NewIterator(DefaultIteratorOptions) + defer itr.Close() + + for itr.Rewind(); itr.Valid(); itr.Next() { + count++ + } + return nil + }) + y.Check(err) + return count +} + +func numKeysManaged(db *DB, readTs uint64) int { + txn := db.NewTransactionAt(readTs, false) + defer txn.Discard() + + itr := txn.NewIterator(DefaultIteratorOptions) + defer itr.Close() + + var count int + for itr.Rewind(); itr.Valid(); itr.Next() { + count++ + } + return count +} + +func TestDropAllManaged(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + opts := getTestOptions(dir) + opts.managedTxns = true + opts.ValueLogFileSize = 5 << 20 + db, err := Open(opts) + require.NoError(t, err) + + N := uint64(10000) + populate := func(db *DB, start uint64) { + var wg sync.WaitGroup + for i := start; i < start+N; i++ { + wg.Add(1) + txn := db.NewTransactionAt(math.MaxUint64, true) + require.NoError(t, txn.Set([]byte(key("key", int(i))), val(true))) + require.NoError(t, txn.CommitAt(uint64(i), func(err error) { + require.NoError(t, err) + wg.Done() + })) + } + wg.Wait() + } + + populate(db, N) + require.Equal(t, int(N), numKeysManaged(db, math.MaxUint64)) + + require.NoError(t, db.DropAll()) + require.NoError(t, db.DropAll()) // Just call it twice, for fun. + require.Equal(t, 0, numKeysManaged(db, math.MaxUint64)) + + // Check that we can still write to mdb, and using lower timestamps. + populate(db, 1) + require.Equal(t, int(N), numKeysManaged(db, math.MaxUint64)) + db.Close() + + // Ensure that value log is correctly replayed, that we are preserving badgerHead. + opts.managedTxns = true + db2, err := Open(opts) + require.NoError(t, err) + require.Equal(t, int(N), numKeysManaged(db2, math.MaxUint64)) + db2.Close() +} + +func TestDropAll(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + opts := getTestOptions(dir) + opts.ValueLogFileSize = 5 << 20 + db, err := Open(opts) + require.NoError(t, err) + + N := uint64(10000) + populate := func(db *DB) { + writer := db.NewWriteBatch() + for i := uint64(0); i < N; i++ { + require.NoError(t, writer.Set([]byte(key("key", int(i))), val(true), 0)) + } + require.NoError(t, writer.Flush()) + } + + populate(db) + require.Equal(t, int(N), numKeys(db)) + + require.NoError(t, db.DropAll()) + require.Equal(t, 0, numKeys(db)) + + // Check that we can still write to mdb, and using lower timestamps. + populate(db) + require.Equal(t, int(N), numKeys(db)) + db.Close() + + // Ensure that value log is correctly replayed. + db2, err := Open(opts) + require.NoError(t, err) + require.Equal(t, int(N), numKeys(db2)) + db2.Close() +} + +func TestDropAllTwice(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + opts := getTestOptions(dir) + opts.ValueLogFileSize = 5 << 20 + db, err := Open(opts) + require.NoError(t, err) + + N := uint64(10000) + populate := func(db *DB) { + writer := db.NewWriteBatch() + for i := uint64(0); i < N; i++ { + require.NoError(t, writer.Set([]byte(key("key", int(i))), val(true), 0)) + } + require.NoError(t, writer.Flush()) + } + + populate(db) + require.Equal(t, int(N), numKeys(db)) + + require.NoError(t, db.DropAll()) + require.Equal(t, 0, numKeys(db)) + + // Call DropAll again. + require.NoError(t, db.DropAll()) +} + +func TestDropAllWithPendingTxn(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + opts := getTestOptions(dir) + opts.ValueLogFileSize = 5 << 20 + db, err := Open(opts) + require.NoError(t, err) + + N := uint64(10000) + populate := func(db *DB) { + writer := db.NewWriteBatch() + for i := uint64(0); i < N; i++ { + require.NoError(t, writer.Set([]byte(key("key", int(i))), val(true), 0)) + } + require.NoError(t, writer.Flush()) + } + + populate(db) + require.Equal(t, int(N), numKeys(db)) + + txn := db.NewTransaction(true) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + itr := txn.NewIterator(DefaultIteratorOptions) + defer itr.Close() + + var keys []string + for { + var count int + for itr.Rewind(); itr.Valid(); itr.Next() { + count++ + item := itr.Item() + keys = append(keys, string(item.KeyCopy(nil))) + _, err := item.ValueCopy(nil) + if err != nil { + t.Logf("Got error during value copy: %v", err) + return + } + } + t.Logf("Got number of keys: %d\n", count) + for _, key := range keys { + item, err := txn.Get([]byte(key)) + if err != nil { + t.Logf("Got error during key lookup: %v", err) + return + } + if _, err := item.ValueCopy(nil); err != nil { + t.Logf("Got error during second value copy: %v", err) + return + } + } + } + }() + // Do not cancel txn. + + go func() { + time.Sleep(2 * time.Second) + require.NoError(t, db.DropAll()) + }() + wg.Wait() +} + +func TestDropReadOnly(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + opts := getTestOptions(dir) + opts.ValueLogFileSize = 5 << 20 + db, err := Open(opts) + require.NoError(t, err) + N := uint64(1000) + populate := func(db *DB) { + writer := db.NewWriteBatch() + for i := uint64(0); i < N; i++ { + require.NoError(t, writer.Set([]byte(key("key", int(i))), val(true), 0)) + } + require.NoError(t, writer.Flush()) + } + + populate(db) + require.Equal(t, int(N), numKeys(db)) + require.NoError(t, db.Close()) + + opts.ReadOnly = true + db2, err := Open(opts) + // acquireDirectoryLock returns ErrWindowsNotSupported on Windows. It can be ignored safely. + if runtime.GOOS == "windows" { + require.Equal(t, err, ErrWindowsNotSupported) + } else { + require.NoError(t, err) + } + require.Panics(t, func() { db2.DropAll() }) +} + +func TestWriteAfterClose(t *testing.T) { + dir, err := ioutil.TempDir(".", "badger-test") + require.NoError(t, err) + defer os.RemoveAll(dir) + opts := getTestOptions(dir) + opts.ValueLogFileSize = 5 << 20 + db, err := Open(opts) + require.NoError(t, err) + N := uint64(1000) + populate := func(db *DB) { + writer := db.NewWriteBatch() + for i := uint64(0); i < N; i++ { + require.NoError(t, writer.Set([]byte(key("key", int(i))), val(true), 0)) + } + require.NoError(t, writer.Flush()) + } + + populate(db) + require.Equal(t, int(N), numKeys(db)) + require.NoError(t, db.Close()) + err = db.Update(func(txn *Txn) error { + return txn.Set([]byte("a"), []byte("b")) + }) + require.Equal(t, ErrBlockedWrites, err) +} + +func TestDropAllRace(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + opts := getTestOptions(dir) + opts.managedTxns = true + db, err := Open(opts) + require.NoError(t, err) + + N := 10000 + // Start a goroutine to keep trying to write to DB while DropAll happens. + closer := y.NewCloser(1) + go func() { + defer closer.Done() + ticker := time.NewTicker(time.Millisecond) + defer ticker.Stop() + + i := N + 1 // Writes would happen above N. + var errors int32 + for { + select { + case <-ticker.C: + i++ + txn := db.NewTransactionAt(math.MaxUint64, true) + require.NoError(t, txn.Set([]byte(key("key", i)), val(false))) + if err := txn.CommitAt(uint64(i), func(err error) { + if err != nil { + atomic.AddInt32(&errors, 1) + } + }); err != nil { + atomic.AddInt32(&errors, 1) + } + case <-closer.HasBeenClosed(): + // The following causes a data race. + // t.Logf("i: %d. Number of (expected) write errors: %d.\n", i, errors) + return + } + } + }() + + var wg sync.WaitGroup + for i := 1; i <= N; i++ { + wg.Add(1) + txn := db.NewTransactionAt(math.MaxUint64, true) + require.NoError(t, txn.Set([]byte(key("key", i)), val(false))) + require.NoError(t, txn.CommitAt(uint64(i), func(err error) { + require.NoError(t, err) + wg.Done() + })) + } + wg.Wait() + + before := numKeysManaged(db, math.MaxUint64) + require.True(t, before > N) + + require.NoError(t, db.DropAll()) + closer.SignalAndWait() + + after := numKeysManaged(db, math.MaxUint64) + t.Logf("Before: %d. After dropall: %d\n", before, after) + require.True(t, after < before) + db.Close() +} diff --git a/vendor/github.com/dgraph-io/badger/manifest.go b/vendor/github.com/dgraph-io/badger/manifest.go new file mode 100644 index 00000000..06a5b96b --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/manifest.go @@ -0,0 +1,436 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "os" + "path/filepath" + "sync" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/pb" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" + "gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors" +) + +// Manifest represents the contents of the MANIFEST file in a Badger store. +// +// The MANIFEST file describes the startup state of the db -- all LSM files and what level they're +// at. +// +// It consists of a sequence of ManifestChangeSet objects. Each of these is treated atomically, +// and contains a sequence of ManifestChange's (file creations/deletions) which we use to +// reconstruct the manifest at startup. +type Manifest struct { + Levels []levelManifest + Tables map[uint64]TableManifest + + // Contains total number of creation and deletion changes in the manifest -- used to compute + // whether it'd be useful to rewrite the manifest. + Creations int + Deletions int +} + +func createManifest() Manifest { + levels := make([]levelManifest, 0) + return Manifest{ + Levels: levels, + Tables: make(map[uint64]TableManifest), + } +} + +// levelManifest contains information about LSM tree levels +// in the MANIFEST file. +type levelManifest struct { + Tables map[uint64]struct{} // Set of table id's +} + +// TableManifest contains information about a specific level +// in the LSM tree. +type TableManifest struct { + Level uint8 + Checksum []byte +} + +// manifestFile holds the file pointer (and other info) about the manifest file, which is a log +// file we append to. +type manifestFile struct { + fp *os.File + directory string + // We make this configurable so that unit tests can hit rewrite() code quickly + deletionsRewriteThreshold int + + // Guards appends, which includes access to the manifest field. + appendLock sync.Mutex + + // Used to track the current state of the manifest, used when rewriting. + manifest Manifest +} + +const ( + // ManifestFilename is the filename for the manifest file. + ManifestFilename = "MANIFEST" + manifestRewriteFilename = "MANIFEST-REWRITE" + manifestDeletionsRewriteThreshold = 10000 + manifestDeletionsRatio = 10 +) + +// asChanges returns a sequence of changes that could be used to recreate the Manifest in its +// present state. +func (m *Manifest) asChanges() []*pb.ManifestChange { + changes := make([]*pb.ManifestChange, 0, len(m.Tables)) + for id, tm := range m.Tables { + changes = append(changes, newCreateChange(id, int(tm.Level), tm.Checksum)) + } + return changes +} + +func (m *Manifest) clone() Manifest { + changeSet := pb.ManifestChangeSet{Changes: m.asChanges()} + ret := createManifest() + y.Check(applyChangeSet(&ret, &changeSet)) + return ret +} + +// openOrCreateManifestFile opens a Badger manifest file if it exists, or creates on if +// one doesn’t. +func openOrCreateManifestFile(dir string, readOnly bool) (ret *manifestFile, result Manifest, err error) { + return helpOpenOrCreateManifestFile(dir, readOnly, manifestDeletionsRewriteThreshold) +} + +func helpOpenOrCreateManifestFile(dir string, readOnly bool, deletionsThreshold int) (ret *manifestFile, result Manifest, err error) { + path := filepath.Join(dir, ManifestFilename) + var flags uint32 + if readOnly { + flags |= y.ReadOnly + } + fp, err := y.OpenExistingFile(path, flags) // We explicitly sync in addChanges, outside the lock. + if err != nil { + if !os.IsNotExist(err) { + return nil, Manifest{}, err + } + if readOnly { + return nil, Manifest{}, fmt.Errorf("no manifest found, required for read-only db") + } + m := createManifest() + fp, netCreations, err := helpRewrite(dir, &m) + if err != nil { + return nil, Manifest{}, err + } + y.AssertTrue(netCreations == 0) + mf := &manifestFile{ + fp: fp, + directory: dir, + manifest: m.clone(), + deletionsRewriteThreshold: deletionsThreshold, + } + return mf, m, nil + } + + manifest, truncOffset, err := ReplayManifestFile(fp) + if err != nil { + _ = fp.Close() + return nil, Manifest{}, err + } + + if !readOnly { + // Truncate file so we don't have a half-written entry at the end. + if err := fp.Truncate(truncOffset); err != nil { + _ = fp.Close() + return nil, Manifest{}, err + } + } + if _, err = fp.Seek(0, io.SeekEnd); err != nil { + _ = fp.Close() + return nil, Manifest{}, err + } + + mf := &manifestFile{ + fp: fp, + directory: dir, + manifest: manifest.clone(), + deletionsRewriteThreshold: deletionsThreshold, + } + return mf, manifest, nil +} + +func (mf *manifestFile) close() error { + return mf.fp.Close() +} + +// addChanges writes a batch of changes, atomically, to the file. By "atomically" that means when +// we replay the MANIFEST file, we'll either replay all the changes or none of them. (The truth of +// this depends on the filesystem -- some might append garbage data if a system crash happens at +// the wrong time.) +func (mf *manifestFile) addChanges(changesParam []*pb.ManifestChange) error { + changes := pb.ManifestChangeSet{Changes: changesParam} + buf, err := changes.Marshal() + if err != nil { + return err + } + + // Maybe we could use O_APPEND instead (on certain file systems) + mf.appendLock.Lock() + if err := applyChangeSet(&mf.manifest, &changes); err != nil { + mf.appendLock.Unlock() + return err + } + // Rewrite manifest if it'd shrink by 1/10 and it's big enough to care + if mf.manifest.Deletions > mf.deletionsRewriteThreshold && + mf.manifest.Deletions > manifestDeletionsRatio*(mf.manifest.Creations-mf.manifest.Deletions) { + if err := mf.rewrite(); err != nil { + mf.appendLock.Unlock() + return err + } + } else { + var lenCrcBuf [8]byte + binary.BigEndian.PutUint32(lenCrcBuf[0:4], uint32(len(buf))) + binary.BigEndian.PutUint32(lenCrcBuf[4:8], crc32.Checksum(buf, y.CastagnoliCrcTable)) + buf = append(lenCrcBuf[:], buf...) + if _, err := mf.fp.Write(buf); err != nil { + mf.appendLock.Unlock() + return err + } + } + + mf.appendLock.Unlock() + return mf.fp.Sync() +} + +// Has to be 4 bytes. The value can never change, ever, anyway. +var magicText = [4]byte{'B', 'd', 'g', 'r'} + +// The magic version number. +const magicVersion = 4 + +func helpRewrite(dir string, m *Manifest) (*os.File, int, error) { + rewritePath := filepath.Join(dir, manifestRewriteFilename) + // We explicitly sync. + fp, err := y.OpenTruncFile(rewritePath, false) + if err != nil { + return nil, 0, err + } + + buf := make([]byte, 8) + copy(buf[0:4], magicText[:]) + binary.BigEndian.PutUint32(buf[4:8], magicVersion) + + netCreations := len(m.Tables) + changes := m.asChanges() + set := pb.ManifestChangeSet{Changes: changes} + + changeBuf, err := set.Marshal() + if err != nil { + fp.Close() + return nil, 0, err + } + var lenCrcBuf [8]byte + binary.BigEndian.PutUint32(lenCrcBuf[0:4], uint32(len(changeBuf))) + binary.BigEndian.PutUint32(lenCrcBuf[4:8], crc32.Checksum(changeBuf, y.CastagnoliCrcTable)) + buf = append(buf, lenCrcBuf[:]...) + buf = append(buf, changeBuf...) + if _, err := fp.Write(buf); err != nil { + fp.Close() + return nil, 0, err + } + if err := fp.Sync(); err != nil { + fp.Close() + return nil, 0, err + } + + // In Windows the files should be closed before doing a Rename. + if err = fp.Close(); err != nil { + return nil, 0, err + } + manifestPath := filepath.Join(dir, ManifestFilename) + if err := os.Rename(rewritePath, manifestPath); err != nil { + return nil, 0, err + } + fp, err = y.OpenExistingFile(manifestPath, 0) + if err != nil { + return nil, 0, err + } + if _, err := fp.Seek(0, io.SeekEnd); err != nil { + fp.Close() + return nil, 0, err + } + if err := syncDir(dir); err != nil { + fp.Close() + return nil, 0, err + } + + return fp, netCreations, nil +} + +// Must be called while appendLock is held. +func (mf *manifestFile) rewrite() error { + // In Windows the files should be closed before doing a Rename. + if err := mf.fp.Close(); err != nil { + return err + } + fp, netCreations, err := helpRewrite(mf.directory, &mf.manifest) + if err != nil { + return err + } + mf.fp = fp + mf.manifest.Creations = netCreations + mf.manifest.Deletions = 0 + + return nil +} + +type countingReader struct { + wrapped *bufio.Reader + count int64 +} + +func (r *countingReader) Read(p []byte) (n int, err error) { + n, err = r.wrapped.Read(p) + r.count += int64(n) + return +} + +func (r *countingReader) ReadByte() (b byte, err error) { + b, err = r.wrapped.ReadByte() + if err == nil { + r.count++ + } + return +} + +var ( + errBadMagic = errors.New("manifest has bad magic") +) + +// ReplayManifestFile reads the manifest file and constructs two manifest objects. (We need one +// immutable copy and one mutable copy of the manifest. Easiest way is to construct two of them.) +// Also, returns the last offset after a completely read manifest entry -- the file must be +// truncated at that point before further appends are made (if there is a partial entry after +// that). In normal conditions, truncOffset is the file size. +func ReplayManifestFile(fp *os.File) (ret Manifest, truncOffset int64, err error) { + r := countingReader{wrapped: bufio.NewReader(fp)} + + var magicBuf [8]byte + if _, err := io.ReadFull(&r, magicBuf[:]); err != nil { + return Manifest{}, 0, errBadMagic + } + if !bytes.Equal(magicBuf[0:4], magicText[:]) { + return Manifest{}, 0, errBadMagic + } + version := binary.BigEndian.Uint32(magicBuf[4:8]) + if version != magicVersion { + return Manifest{}, 0, + fmt.Errorf("manifest has unsupported version: %d (we support %d)", version, magicVersion) + } + + build := createManifest() + var offset int64 + for { + offset = r.count + var lenCrcBuf [8]byte + _, err := io.ReadFull(&r, lenCrcBuf[:]) + if err != nil { + if err == io.EOF || err == io.ErrUnexpectedEOF { + break + } + return Manifest{}, 0, err + } + length := binary.BigEndian.Uint32(lenCrcBuf[0:4]) + var buf = make([]byte, length) + if _, err := io.ReadFull(&r, buf); err != nil { + if err == io.EOF || err == io.ErrUnexpectedEOF { + break + } + return Manifest{}, 0, err + } + if crc32.Checksum(buf, y.CastagnoliCrcTable) != binary.BigEndian.Uint32(lenCrcBuf[4:8]) { + break + } + + var changeSet pb.ManifestChangeSet + if err := changeSet.Unmarshal(buf); err != nil { + return Manifest{}, 0, err + } + + if err := applyChangeSet(&build, &changeSet); err != nil { + return Manifest{}, 0, err + } + } + + return build, offset, err +} + +func applyManifestChange(build *Manifest, tc *pb.ManifestChange) error { + switch tc.Op { + case pb.ManifestChange_CREATE: + if _, ok := build.Tables[tc.Id]; ok { + return fmt.Errorf("MANIFEST invalid, table %d exists", tc.Id) + } + build.Tables[tc.Id] = TableManifest{ + Level: uint8(tc.Level), + Checksum: append([]byte{}, tc.Checksum...), + } + for len(build.Levels) <= int(tc.Level) { + build.Levels = append(build.Levels, levelManifest{make(map[uint64]struct{})}) + } + build.Levels[tc.Level].Tables[tc.Id] = struct{}{} + build.Creations++ + case pb.ManifestChange_DELETE: + tm, ok := build.Tables[tc.Id] + if !ok { + return fmt.Errorf("MANIFEST removes non-existing table %d", tc.Id) + } + delete(build.Levels[tm.Level].Tables, tc.Id) + delete(build.Tables, tc.Id) + build.Deletions++ + default: + return fmt.Errorf("MANIFEST file has invalid manifestChange op") + } + return nil +} + +// This is not a "recoverable" error -- opening the KV store fails because the MANIFEST file is +// just plain broken. +func applyChangeSet(build *Manifest, changeSet *pb.ManifestChangeSet) error { + for _, change := range changeSet.Changes { + if err := applyManifestChange(build, change); err != nil { + return err + } + } + return nil +} + +func newCreateChange(id uint64, level int, checksum []byte) *pb.ManifestChange { + return &pb.ManifestChange{ + Id: id, + Op: pb.ManifestChange_CREATE, + Level: uint32(level), + Checksum: checksum, + } +} + +func newDeleteChange(id uint64) *pb.ManifestChange { + return &pb.ManifestChange{ + Id: id, + Op: pb.ManifestChange_DELETE, + } +} diff --git a/vendor/github.com/dgraph-io/badger/manifest_test.go b/vendor/github.com/dgraph-io/badger/manifest_test.go new file mode 100644 index 00000000..7bf55a68 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/manifest_test.go @@ -0,0 +1,244 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "fmt" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "sort" + "testing" + + "gx/ipfs/QmRvYNctevGUW52urgmoFZscT6buMKqhHezLUS64WepGWn/go-net/trace" + + "github.com/stretchr/testify/require" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/options" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/pb" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/table" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" +) + +func TestManifestBasic(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + + opt := getTestOptions(dir) + { + kv, err := Open(opt) + require.NoError(t, err) + n := 5000 + for i := 0; i < n; i++ { + if (i % 10000) == 0 { + fmt.Printf("Putting i=%d\n", i) + } + k := []byte(fmt.Sprintf("%16x", rand.Int63())) + txnSet(t, kv, k, k, 0x00) + } + txnSet(t, kv, []byte("testkey"), []byte("testval"), 0x05) + kv.validate() + require.NoError(t, kv.Close()) + } + + kv, err := Open(opt) + require.NoError(t, err) + + require.NoError(t, kv.View(func(txn *Txn) error { + item, err := txn.Get([]byte("testkey")) + require.NoError(t, err) + require.EqualValues(t, "testval", string(getItemValue(t, item))) + require.EqualValues(t, byte(0x05), item.UserMeta()) + return nil + })) + require.NoError(t, kv.Close()) +} + +func helpTestManifestFileCorruption(t *testing.T, off int64, errorContent string) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + + opt := getTestOptions(dir) + { + kv, err := Open(opt) + require.NoError(t, err) + require.NoError(t, kv.Close()) + } + fp, err := os.OpenFile(filepath.Join(dir, ManifestFilename), os.O_RDWR, 0) + require.NoError(t, err) + // Mess with magic value or version to force error + _, err = fp.WriteAt([]byte{'X'}, off) + require.NoError(t, err) + require.NoError(t, fp.Close()) + kv, err := Open(opt) + defer func() { + if kv != nil { + kv.Close() + } + }() + require.Error(t, err) + require.Contains(t, err.Error(), errorContent) +} + +func TestManifestMagic(t *testing.T) { + helpTestManifestFileCorruption(t, 3, "bad magic") +} + +func TestManifestVersion(t *testing.T) { + helpTestManifestFileCorruption(t, 4, "unsupported version") +} + +func key(prefix string, i int) string { + return prefix + fmt.Sprintf("%04d", i) +} + +func buildTestTable(t *testing.T, prefix string, n int) *os.File { + y.AssertTrue(n <= 10000) + keyValues := make([][]string, n) + for i := 0; i < n; i++ { + k := key(prefix, i) + v := fmt.Sprintf("%d", i) + keyValues[i] = []string{k, v} + } + return buildTable(t, keyValues) +} + +// TODO - Move these to somewhere where table package can also use it. +// keyValues is n by 2 where n is number of pairs. +func buildTable(t *testing.T, keyValues [][]string) *os.File { + b := table.NewTableBuilder() + defer b.Close() + // TODO: Add test for file garbage collection here. No files should be left after the tests here. + + filename := fmt.Sprintf("%s%s%d.sst", os.TempDir(), string(os.PathSeparator), rand.Int63()) + f, err := y.OpenSyncedFile(filename, true) + if t != nil { + require.NoError(t, err) + } else { + y.Check(err) + } + + sort.Slice(keyValues, func(i, j int) bool { + return keyValues[i][0] < keyValues[j][0] + }) + for _, kv := range keyValues { + y.AssertTrue(len(kv) == 2) + err := b.Add(y.KeyWithTs([]byte(kv[0]), 10), y.ValueStruct{ + Value: []byte(kv[1]), + Meta: 'A', + UserMeta: 0, + }) + if t != nil { + require.NoError(t, err) + } else { + y.Check(err) + } + } + f.Write(b.Finish()) + f.Close() + f, _ = y.OpenSyncedFile(filename, true) + return f +} + +func TestOverlappingKeyRangeError(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + opt := DefaultOptions + opt.Dir = dir + opt.ValueDir = dir + kv, err := Open(opt) + require.NoError(t, err) + + lh0 := newLevelHandler(kv, 0) + lh1 := newLevelHandler(kv, 1) + f := buildTestTable(t, "k", 2) + t1, err := table.OpenTable(f, options.MemoryMap, nil) + require.NoError(t, err) + defer t1.DecrRef() + + done := lh0.tryAddLevel0Table(t1) + require.Equal(t, true, done) + + cd := compactDef{ + thisLevel: lh0, + nextLevel: lh1, + elog: trace.New("Badger", "Compact"), + } + + manifest := createManifest() + lc, err := newLevelsController(kv, &manifest) + require.NoError(t, err) + done = lc.fillTablesL0(&cd) + require.Equal(t, true, done) + lc.runCompactDef(0, cd) + + f = buildTestTable(t, "l", 2) + t2, err := table.OpenTable(f, options.MemoryMap, nil) + require.NoError(t, err) + defer t2.DecrRef() + done = lh0.tryAddLevel0Table(t2) + require.Equal(t, true, done) + + cd = compactDef{ + thisLevel: lh0, + nextLevel: lh1, + elog: trace.New("Badger", "Compact"), + } + lc.fillTablesL0(&cd) + lc.runCompactDef(0, cd) +} + +func TestManifestRewrite(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + deletionsThreshold := 10 + mf, m, err := helpOpenOrCreateManifestFile(dir, false, deletionsThreshold) + defer func() { + if mf != nil { + mf.close() + } + }() + require.NoError(t, err) + require.Equal(t, 0, m.Creations) + require.Equal(t, 0, m.Deletions) + + err = mf.addChanges([]*pb.ManifestChange{ + newCreateChange(0, 0, nil), + }) + require.NoError(t, err) + + for i := uint64(0); i < uint64(deletionsThreshold*3); i++ { + ch := []*pb.ManifestChange{ + newCreateChange(i+1, 0, nil), + newDeleteChange(i), + } + err := mf.addChanges(ch) + require.NoError(t, err) + } + err = mf.close() + require.NoError(t, err) + mf = nil + mf, m, err = helpOpenOrCreateManifestFile(dir, false, deletionsThreshold) + require.NoError(t, err) + require.Equal(t, map[uint64]TableManifest{ + uint64(deletionsThreshold * 3): {Level: 0, Checksum: []byte{}}, + }, m.Tables) +} diff --git a/vendor/github.com/dgraph-io/badger/merge.go b/vendor/github.com/dgraph-io/badger/merge.go new file mode 100644 index 00000000..af35e1f4 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/merge.go @@ -0,0 +1,173 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "sync" + "time" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" + "gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors" +) + +// MergeOperator represents a Badger merge operator. +type MergeOperator struct { + sync.RWMutex + f MergeFunc + db *DB + key []byte + closer *y.Closer +} + +// MergeFunc accepts two byte slices, one representing an existing value, and +// another representing a new value that needs to be ‘merged’ into it. MergeFunc +// contains the logic to perform the ‘merge’ and return an updated value. +// MergeFunc could perform operations like integer addition, list appends etc. +// Note that the ordering of the operands is unspecified, so the merge func +// should either be agnostic to ordering or do additional handling if ordering +// is required. +type MergeFunc func(existing, val []byte) []byte + +// GetMergeOperator creates a new MergeOperator for a given key and returns a +// pointer to it. It also fires off a goroutine that performs a compaction using +// the merge function that runs periodically, as specified by dur. +func (db *DB) GetMergeOperator(key []byte, + f MergeFunc, dur time.Duration) *MergeOperator { + op := &MergeOperator{ + f: f, + db: db, + key: key, + closer: y.NewCloser(1), + } + + go op.runCompactions(dur) + return op +} + +var errNoMerge = errors.New("No need for merge") + +func (op *MergeOperator) iterateAndMerge(txn *Txn) (val []byte, err error) { + opt := DefaultIteratorOptions + opt.AllVersions = true + it := txn.NewIterator(opt) + defer it.Close() + + var numVersions int + for it.Rewind(); it.ValidForPrefix(op.key); it.Next() { + item := it.Item() + numVersions++ + if numVersions == 1 { + val, err = item.ValueCopy(val) + if err != nil { + return nil, err + } + } else { + if err := item.Value(func(newVal []byte) error { + val = op.f(val, newVal) + return nil + }); err != nil { + return nil, err + } + } + if item.DiscardEarlierVersions() { + break + } + } + if numVersions == 0 { + return nil, ErrKeyNotFound + } else if numVersions == 1 { + return val, errNoMerge + } + return val, nil +} + +func (op *MergeOperator) compact() error { + op.Lock() + defer op.Unlock() + err := op.db.Update(func(txn *Txn) error { + var ( + val []byte + err error + ) + val, err = op.iterateAndMerge(txn) + if err != nil { + return err + } + + // Write value back to db + return txn.SetWithDiscard(op.key, val, 0) + }) + + if err == ErrKeyNotFound || err == errNoMerge { + // pass. + } else if err != nil { + return err + } + return nil +} + +func (op *MergeOperator) runCompactions(dur time.Duration) { + ticker := time.NewTicker(dur) + defer op.closer.Done() + var stop bool + for { + select { + case <-op.closer.HasBeenClosed(): + stop = true + case <-ticker.C: // wait for tick + } + if err := op.compact(); err != nil { + op.db.opt.Errorf("failure while running merge operation: %s", err) + } + if stop { + ticker.Stop() + break + } + } +} + +// Add records a value in Badger which will eventually be merged by a background +// routine into the values that were recorded by previous invocations to Add(). +func (op *MergeOperator) Add(val []byte) error { + return op.db.Update(func(txn *Txn) error { + return txn.Set(op.key, val) + }) +} + +// Get returns the latest value for the merge operator, which is derived by +// applying the merge function to all the values added so far. +// +// If Add has not been called even once, Get will return ErrKeyNotFound. +func (op *MergeOperator) Get() ([]byte, error) { + op.RLock() + defer op.RUnlock() + var existing []byte + err := op.db.View(func(txn *Txn) (err error) { + existing, err = op.iterateAndMerge(txn) + return err + }) + if err == errNoMerge { + return existing, nil + } + return existing, err +} + +// Stop waits for any pending merge to complete and then stops the background +// goroutine. +func (op *MergeOperator) Stop() { + op.closer.SignalAndWait() +} diff --git a/vendor/github.com/dgraph-io/badger/options.go b/vendor/github.com/dgraph-io/badger/options.go new file mode 100644 index 00000000..cdfddbe7 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/options.go @@ -0,0 +1,165 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/options" +) + +// NOTE: Keep the comments in the following to 75 chars width, so they +// format nicely in godoc. + +// Options are params for creating DB object. +// +// This package provides DefaultOptions which contains options that should +// work for most applications. Consider using that as a starting point before +// customizing it for your own needs. +type Options struct { + // 1. Mandatory flags + // ------------------- + // Directory to store the data in. If it doesn't exist, Badger will + // try to create it for you. + Dir string + // Directory to store the value log in. Can be the same as Dir. If it + // doesn't exist, Badger will try to create it for you. + ValueDir string + + // 2. Frequently modified flags + // ----------------------------- + // Sync all writes to disk. Setting this to false would achieve better + // performance, but may cause data to be lost. + SyncWrites bool + + // How should LSM tree be accessed. + TableLoadingMode options.FileLoadingMode + + // How should value log be accessed. + ValueLogLoadingMode options.FileLoadingMode + + // How many versions to keep per key. + NumVersionsToKeep int + + // 3. Flags that user might want to review + // ---------------------------------------- + // The following affect all levels of LSM tree. + MaxTableSize int64 // Each table (or file) is at most this size. + LevelSizeMultiplier int // Equals SizeOf(Li+1)/SizeOf(Li). + MaxLevels int // Maximum number of levels of compaction. + // If value size >= this threshold, only store value offsets in tree. + ValueThreshold int + // Maximum number of tables to keep in memory, before stalling. + NumMemtables int + // The following affect how we handle LSM tree L0. + // Maximum number of Level 0 tables before we start compacting. + NumLevelZeroTables int + + // If we hit this number of Level 0 tables, we will stall until L0 is + // compacted away. + NumLevelZeroTablesStall int + + // Maximum total size for L1. + LevelOneSize int64 + + // Size of single value log file. + ValueLogFileSize int64 + + // Max number of entries a value log file can hold (approximately). A value log file would be + // determined by the smaller of its file size and max entries. + ValueLogMaxEntries uint32 + + // Number of compaction workers to run concurrently. Setting this to zero would stop compactions + // to happen within LSM tree. If set to zero, writes could block forever. + NumCompactors int + + // When closing the DB, force compact Level 0. This ensures that both reads and writes are + // efficient when the DB is opened later. + CompactL0OnClose bool + + // Transaction start and commit timestamps are managed by end-user. + // This is only useful for databases built on top of Badger (like Dgraph). + // Not recommended for most users. + managedTxns bool + + // 4. Flags for testing purposes + // ------------------------------ + maxBatchCount int64 // max entries in batch + maxBatchSize int64 // max batch size in bytes + + // Open the DB as read-only. With this set, multiple processes can + // open the same Badger DB. Note: if the DB being opened had crashed + // before and has vlog data to be replayed, ReadOnly will cause Open + // to fail with an appropriate message. + ReadOnly bool + + // Truncate value log to delete corrupt data, if any. Would not truncate if ReadOnly is set. + Truncate bool + + // DB-specific logger which will override the global logger. + Logger Logger +} + +// DefaultOptions sets a list of recommended options for good performance. +// Feel free to modify these to suit your needs. +var DefaultOptions = Options{ + LevelOneSize: 256 << 20, + LevelSizeMultiplier: 10, + TableLoadingMode: options.LoadToRAM, + ValueLogLoadingMode: options.MemoryMap, + // table.MemoryMap to mmap() the tables. + // table.Nothing to not preload the tables. + MaxLevels: 7, + MaxTableSize: 64 << 20, + NumCompactors: 2, // Compactions can be expensive. Only run 2. + NumLevelZeroTables: 5, + NumLevelZeroTablesStall: 10, + NumMemtables: 5, + SyncWrites: true, + NumVersionsToKeep: 1, + CompactL0OnClose: true, + // Nothing to read/write value log using standard File I/O + // MemoryMap to mmap() the value log files + // (2^30 - 1)*2 when mmapping < 2^31 - 1, max int32. + // -1 so 2*ValueLogFileSize won't overflow on 32-bit systems. + ValueLogFileSize: 1<<30 - 1, + + ValueLogMaxEntries: 1000000, + ValueThreshold: 32, + Truncate: false, + Logger: defaultLogger, +} + +// LSMOnlyOptions follows from DefaultOptions, but sets a higher ValueThreshold +// so values would be colocated with the LSM tree, with value log largely acting +// as a write-ahead log only. These options would reduce the disk usage of value +// log, and make Badger act more like a typical LSM tree. +var LSMOnlyOptions = Options{} + +func init() { + LSMOnlyOptions = DefaultOptions + + LSMOnlyOptions.ValueThreshold = 65500 // Max value length which fits in uint16. + // Let's not set any other options, because they can cause issues with the + // size of key-value a user can pass to Badger. For e.g., if we set + // ValueLogFileSize to 64MB, a user can't pass a value more than that. + // Setting it to ValueLogMaxEntries to 1000, can generate too many files. + // These options are better configured on a usage basis, than broadly here. + // The ValueThreshold is the most important setting a user needs to do to + // achieve a heavier usage of LSM tree. + // NOTE: If a user does not want to set 64KB as the ValueThreshold because + // of performance reasons, 1KB would be a good option too, allowing + // values smaller than 1KB to be colocated with the keys in the LSM tree. +} diff --git a/vendor/github.com/dgraph-io/badger/options/options.go b/vendor/github.com/dgraph-io/badger/options/options.go new file mode 100644 index 00000000..06c8b1b7 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/options/options.go @@ -0,0 +1,30 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package options + +// FileLoadingMode specifies how data in LSM table files and value log files should +// be loaded. +type FileLoadingMode int + +const ( + // FileIO indicates that files must be loaded using standard I/O + FileIO FileLoadingMode = iota + // LoadToRAM indicates that file must be loaded into RAM + LoadToRAM + // MemoryMap indicates that that the file must be memory-mapped + MemoryMap +) diff --git a/vendor/github.com/dgraph-io/badger/package.json b/vendor/github.com/dgraph-io/badger/package.json new file mode 100644 index 00000000..86883fbb --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/package.json @@ -0,0 +1,72 @@ +{ + "author": "dgraph-io", + "bugs": { + "url": "https://github.com/dgraph-io/badger" + }, + "gx": { + "dvcsimport": "github.com/dgraph-io/badger" + }, + "gxDependencies": [ + { + "author": "whyrusleeping", + "hash": "QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy", + "name": "errors", + "version": "0.0.1" + }, + { + "author": "magik6k", + "hash": "Qmbvv2urkn5Wtwws4yzjE85qRjB293EodchZofJsrTRuvN", + "name": "go-lz4", + "version": "1.0.0" + }, + { + "author": "kubuxu", + "hash": "QmWaLViWQF8jgyoLLqqcSrnp6dJpHESiJfzor1vrfDyTZf", + "name": "bbloom", + "version": "0.1.2" + }, + { + "author": "kubuxu", + "hash": "QmVGjyM9i2msKvLXwh9VosCTgP4mL91kC7hDmqnwTTx6Hu", + "name": "sys", + "version": "0.2.0" + }, + { + "author": "whyrusleeping", + "hash": "QmRvYNctevGUW52urgmoFZscT6buMKqhHezLUS64WepGWn", + "name": "go-net", + "version": "0.2.0" + }, + { + "author": "magik6k", + "hash": "QmRFFHk2jw9tgjxv12bCuuTnSbVXxEvYQkuNCLMEv9eUwP", + "name": "go-farm", + "version": "1.0.0" + }, + { + "author": "magik6k", + "hash": "QmQMxG9D52TirZd9eLA37nxiNspnMRkKbyPWrVAa1gvtSy", + "name": "go-humanize", + "version": "1.0.1" + }, + { + "author": "GoGo", + "hash": "QmddjPSGZb3ieihSseFeCfVRpZzcqczPNsD2DvarSwnjJB", + "name": "gogo-protobuf", + "version": "1.2.1" + }, + { + "author": "magik6k", + "hash": "QmXj63M2w2Pq7mnBpcrs7Va8prmfhvfMUNqVhJ9TgjiMbT", + "name": "cobra", + "version": "0.0.1" + } + ], + "gxVersion": "0.10.0", + "language": "go", + "license": "Apache 2.0", + "name": "badger", + "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", + "version": "2.11.4" +} + diff --git a/vendor/github.com/dgraph-io/badger/pb/gen.sh b/vendor/github.com/dgraph-io/badger/pb/gen.sh new file mode 100644 index 00000000..bb446f26 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/pb/gen.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# You might need to go get -v github.com/gogo/protobuf/... + +protos=${GOPATH-$HOME/go}/src/github.com/dgraph-io/badger/pb +pushd $protos > /dev/null +protoc --gogofaster_out=. -I=. pb.proto diff --git a/vendor/github.com/dgraph-io/badger/pb/pb.pb.go b/vendor/github.com/dgraph-io/badger/pb/pb.pb.go new file mode 100644 index 00000000..147c581c --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/pb/pb.pb.go @@ -0,0 +1,1236 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: pb.proto + +package pb + +import ( + fmt "fmt" + proto "gx/ipfs/QmddjPSGZb3ieihSseFeCfVRpZzcqczPNsD2DvarSwnjJB/gogo-protobuf/proto" + io "io" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type ManifestChange_Operation int32 + +const ( + ManifestChange_CREATE ManifestChange_Operation = 0 + ManifestChange_DELETE ManifestChange_Operation = 1 +) + +var ManifestChange_Operation_name = map[int32]string{ + 0: "CREATE", + 1: "DELETE", +} + +var ManifestChange_Operation_value = map[string]int32{ + "CREATE": 0, + "DELETE": 1, +} + +func (x ManifestChange_Operation) String() string { + return proto.EnumName(ManifestChange_Operation_name, int32(x)) +} + +func (ManifestChange_Operation) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_f80abaa17e25ccc8, []int{3, 0} +} + +type KV struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + UserMeta []byte `protobuf:"bytes,3,opt,name=user_meta,json=userMeta,proto3" json:"user_meta,omitempty"` + Version uint64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` + ExpiresAt uint64 `protobuf:"varint,5,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` + Meta []byte `protobuf:"bytes,6,opt,name=meta,proto3" json:"meta,omitempty"` +} + +func (m *KV) Reset() { *m = KV{} } +func (m *KV) String() string { return proto.CompactTextString(m) } +func (*KV) ProtoMessage() {} +func (*KV) Descriptor() ([]byte, []int) { + return fileDescriptor_f80abaa17e25ccc8, []int{0} +} +func (m *KV) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KV) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_KV.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *KV) XXX_Merge(src proto.Message) { + xxx_messageInfo_KV.Merge(m, src) +} +func (m *KV) XXX_Size() int { + return m.Size() +} +func (m *KV) XXX_DiscardUnknown() { + xxx_messageInfo_KV.DiscardUnknown(m) +} + +var xxx_messageInfo_KV proto.InternalMessageInfo + +func (m *KV) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *KV) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *KV) GetUserMeta() []byte { + if m != nil { + return m.UserMeta + } + return nil +} + +func (m *KV) GetVersion() uint64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *KV) GetExpiresAt() uint64 { + if m != nil { + return m.ExpiresAt + } + return 0 +} + +func (m *KV) GetMeta() []byte { + if m != nil { + return m.Meta + } + return nil +} + +type KVList struct { + Kv []*KV `protobuf:"bytes,1,rep,name=kv,proto3" json:"kv,omitempty"` +} + +func (m *KVList) Reset() { *m = KVList{} } +func (m *KVList) String() string { return proto.CompactTextString(m) } +func (*KVList) ProtoMessage() {} +func (*KVList) Descriptor() ([]byte, []int) { + return fileDescriptor_f80abaa17e25ccc8, []int{1} +} +func (m *KVList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KVList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_KVList.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *KVList) XXX_Merge(src proto.Message) { + xxx_messageInfo_KVList.Merge(m, src) +} +func (m *KVList) XXX_Size() int { + return m.Size() +} +func (m *KVList) XXX_DiscardUnknown() { + xxx_messageInfo_KVList.DiscardUnknown(m) +} + +var xxx_messageInfo_KVList proto.InternalMessageInfo + +func (m *KVList) GetKv() []*KV { + if m != nil { + return m.Kv + } + return nil +} + +type ManifestChangeSet struct { + // A set of changes that are applied atomically. + Changes []*ManifestChange `protobuf:"bytes,1,rep,name=changes,proto3" json:"changes,omitempty"` +} + +func (m *ManifestChangeSet) Reset() { *m = ManifestChangeSet{} } +func (m *ManifestChangeSet) String() string { return proto.CompactTextString(m) } +func (*ManifestChangeSet) ProtoMessage() {} +func (*ManifestChangeSet) Descriptor() ([]byte, []int) { + return fileDescriptor_f80abaa17e25ccc8, []int{2} +} +func (m *ManifestChangeSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ManifestChangeSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ManifestChangeSet.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ManifestChangeSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_ManifestChangeSet.Merge(m, src) +} +func (m *ManifestChangeSet) XXX_Size() int { + return m.Size() +} +func (m *ManifestChangeSet) XXX_DiscardUnknown() { + xxx_messageInfo_ManifestChangeSet.DiscardUnknown(m) +} + +var xxx_messageInfo_ManifestChangeSet proto.InternalMessageInfo + +func (m *ManifestChangeSet) GetChanges() []*ManifestChange { + if m != nil { + return m.Changes + } + return nil +} + +type ManifestChange struct { + Id uint64 `protobuf:"varint,1,opt,name=Id,proto3" json:"Id,omitempty"` + Op ManifestChange_Operation `protobuf:"varint,2,opt,name=Op,proto3,enum=pb.ManifestChange_Operation" json:"Op,omitempty"` + Level uint32 `protobuf:"varint,3,opt,name=Level,proto3" json:"Level,omitempty"` + Checksum []byte `protobuf:"bytes,4,opt,name=Checksum,proto3" json:"Checksum,omitempty"` +} + +func (m *ManifestChange) Reset() { *m = ManifestChange{} } +func (m *ManifestChange) String() string { return proto.CompactTextString(m) } +func (*ManifestChange) ProtoMessage() {} +func (*ManifestChange) Descriptor() ([]byte, []int) { + return fileDescriptor_f80abaa17e25ccc8, []int{3} +} +func (m *ManifestChange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ManifestChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ManifestChange.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ManifestChange) XXX_Merge(src proto.Message) { + xxx_messageInfo_ManifestChange.Merge(m, src) +} +func (m *ManifestChange) XXX_Size() int { + return m.Size() +} +func (m *ManifestChange) XXX_DiscardUnknown() { + xxx_messageInfo_ManifestChange.DiscardUnknown(m) +} + +var xxx_messageInfo_ManifestChange proto.InternalMessageInfo + +func (m *ManifestChange) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *ManifestChange) GetOp() ManifestChange_Operation { + if m != nil { + return m.Op + } + return ManifestChange_CREATE +} + +func (m *ManifestChange) GetLevel() uint32 { + if m != nil { + return m.Level + } + return 0 +} + +func (m *ManifestChange) GetChecksum() []byte { + if m != nil { + return m.Checksum + } + return nil +} + +func init() { + proto.RegisterEnum("pb.ManifestChange_Operation", ManifestChange_Operation_name, ManifestChange_Operation_value) + proto.RegisterType((*KV)(nil), "pb.KV") + proto.RegisterType((*KVList)(nil), "pb.KVList") + proto.RegisterType((*ManifestChangeSet)(nil), "pb.ManifestChangeSet") + proto.RegisterType((*ManifestChange)(nil), "pb.ManifestChange") +} + +func init() { proto.RegisterFile("pb.proto", fileDescriptor_f80abaa17e25ccc8) } + +var fileDescriptor_f80abaa17e25ccc8 = []byte{ + // 349 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x91, 0x4b, 0x4e, 0xc2, 0x40, + 0x18, 0xc7, 0x3b, 0x43, 0x29, 0xf0, 0x89, 0x04, 0x27, 0xc6, 0x4c, 0x7c, 0x34, 0x4d, 0xdd, 0xb0, + 0x20, 0x5d, 0xe0, 0x09, 0x10, 0xbb, 0x20, 0x40, 0x48, 0x46, 0xc2, 0x96, 0xb4, 0x30, 0x4a, 0x53, + 0x68, 0x27, 0xed, 0xd0, 0xe8, 0x2d, 0x5c, 0x79, 0x03, 0xef, 0xe2, 0x92, 0xa5, 0x4b, 0x03, 0x17, + 0x31, 0x1d, 0x1e, 0x09, 0x71, 0xf7, 0x7f, 0xcc, 0xf7, 0x5f, 0xfc, 0x06, 0xca, 0xc2, 0x77, 0x44, + 0x12, 0xcb, 0x98, 0x60, 0xe1, 0xdb, 0x9f, 0x08, 0x70, 0x6f, 0x4c, 0xea, 0x50, 0x08, 0xf9, 0x3b, + 0x45, 0x16, 0x6a, 0x54, 0x59, 0x2e, 0xc9, 0x25, 0x14, 0x33, 0x6f, 0xb1, 0xe2, 0x14, 0xab, 0x6c, + 0x67, 0xc8, 0x0d, 0x54, 0x56, 0x29, 0x4f, 0x26, 0x4b, 0x2e, 0x3d, 0x5a, 0x50, 0x4d, 0x39, 0x0f, + 0x06, 0x5c, 0x7a, 0x84, 0x42, 0x29, 0xe3, 0x49, 0x1a, 0xc4, 0x11, 0xd5, 0x2d, 0xd4, 0xd0, 0xd9, + 0xc1, 0x92, 0x3b, 0x00, 0xfe, 0x26, 0x82, 0x84, 0xa7, 0x13, 0x4f, 0xd2, 0xa2, 0x2a, 0x2b, 0xfb, + 0xa4, 0x2d, 0x09, 0x01, 0x5d, 0x0d, 0x1a, 0x6a, 0x50, 0x69, 0xdb, 0x02, 0xa3, 0x37, 0xee, 0x07, + 0xa9, 0x24, 0x57, 0x80, 0xc3, 0x8c, 0x22, 0xab, 0xd0, 0x38, 0x6b, 0x19, 0x8e, 0xf0, 0x9d, 0xde, + 0x98, 0xe1, 0x30, 0xb3, 0xdb, 0x70, 0x31, 0xf0, 0xa2, 0xe0, 0x85, 0xa7, 0xb2, 0x33, 0xf7, 0xa2, + 0x57, 0xfe, 0xcc, 0x25, 0x69, 0x42, 0x69, 0xaa, 0x4c, 0xba, 0xbf, 0x20, 0xf9, 0xc5, 0xe9, 0x3b, + 0x76, 0x78, 0x62, 0x7f, 0x21, 0xa8, 0x9d, 0x76, 0xa4, 0x06, 0xb8, 0x3b, 0x53, 0x20, 0x74, 0x86, + 0xbb, 0x33, 0xd2, 0x04, 0x3c, 0x14, 0x0a, 0x42, 0xad, 0x75, 0xfb, 0x7f, 0xcb, 0x19, 0x0a, 0x9e, + 0x78, 0x32, 0x88, 0x23, 0x86, 0x87, 0x22, 0xa7, 0xd6, 0xe7, 0x19, 0x5f, 0x28, 0x36, 0xe7, 0x6c, + 0x67, 0xc8, 0x35, 0x94, 0x3b, 0x73, 0x3e, 0x0d, 0xd3, 0xd5, 0x52, 0x91, 0xa9, 0xb2, 0xa3, 0xb7, + 0xef, 0xa1, 0x72, 0x9c, 0x20, 0x00, 0x46, 0x87, 0xb9, 0xed, 0x91, 0x5b, 0xd7, 0x72, 0xfd, 0xe4, + 0xf6, 0xdd, 0x91, 0x5b, 0x47, 0x8f, 0xf4, 0x7b, 0x63, 0xa2, 0xf5, 0xc6, 0x44, 0xbf, 0x1b, 0x13, + 0x7d, 0x6c, 0x4d, 0x6d, 0xbd, 0x35, 0xb5, 0x9f, 0xad, 0xa9, 0xf9, 0x86, 0xfa, 0xca, 0x87, 0xbf, + 0x00, 0x00, 0x00, 0xff, 0xff, 0xc9, 0xf9, 0xca, 0x14, 0xd6, 0x01, 0x00, 0x00, +} + +func (m *KV) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KV) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPb(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPb(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + if len(m.UserMeta) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintPb(dAtA, i, uint64(len(m.UserMeta))) + i += copy(dAtA[i:], m.UserMeta) + } + if m.Version != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintPb(dAtA, i, uint64(m.Version)) + } + if m.ExpiresAt != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintPb(dAtA, i, uint64(m.ExpiresAt)) + } + if len(m.Meta) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintPb(dAtA, i, uint64(len(m.Meta))) + i += copy(dAtA[i:], m.Meta) + } + return i, nil +} + +func (m *KVList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KVList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Kv) > 0 { + for _, msg := range m.Kv { + dAtA[i] = 0xa + i++ + i = encodeVarintPb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ManifestChangeSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ManifestChangeSet) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Changes) > 0 { + for _, msg := range m.Changes { + dAtA[i] = 0xa + i++ + i = encodeVarintPb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ManifestChange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ManifestChange) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Id != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintPb(dAtA, i, uint64(m.Id)) + } + if m.Op != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintPb(dAtA, i, uint64(m.Op)) + } + if m.Level != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintPb(dAtA, i, uint64(m.Level)) + } + if len(m.Checksum) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintPb(dAtA, i, uint64(len(m.Checksum))) + i += copy(dAtA[i:], m.Checksum) + } + return i, nil +} + +func encodeVarintPb(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *KV) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovPb(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovPb(uint64(l)) + } + l = len(m.UserMeta) + if l > 0 { + n += 1 + l + sovPb(uint64(l)) + } + if m.Version != 0 { + n += 1 + sovPb(uint64(m.Version)) + } + if m.ExpiresAt != 0 { + n += 1 + sovPb(uint64(m.ExpiresAt)) + } + l = len(m.Meta) + if l > 0 { + n += 1 + l + sovPb(uint64(l)) + } + return n +} + +func (m *KVList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Kv) > 0 { + for _, e := range m.Kv { + l = e.Size() + n += 1 + l + sovPb(uint64(l)) + } + } + return n +} + +func (m *ManifestChangeSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Changes) > 0 { + for _, e := range m.Changes { + l = e.Size() + n += 1 + l + sovPb(uint64(l)) + } + } + return n +} + +func (m *ManifestChange) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovPb(uint64(m.Id)) + } + if m.Op != 0 { + n += 1 + sovPb(uint64(m.Op)) + } + if m.Level != 0 { + n += 1 + sovPb(uint64(m.Level)) + } + l = len(m.Checksum) + if l > 0 { + n += 1 + l + sovPb(uint64(l)) + } + return n +} + +func sovPb(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozPb(x uint64) (n int) { + return sovPb(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *KV) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KV: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KV: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthPb + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthPb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthPb + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthPb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMeta", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthPb + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthPb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserMeta = append(m.UserMeta[:0], dAtA[iNdEx:postIndex]...) + if m.UserMeta == nil { + m.UserMeta = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpiresAt", wireType) + } + m.ExpiresAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExpiresAt |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthPb + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthPb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Meta = append(m.Meta[:0], dAtA[iNdEx:postIndex]...) + if m.Meta == nil { + m.Meta = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPb + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KVList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KVList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KVList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kv = append(m.Kv, &KV{}) + if err := m.Kv[len(m.Kv)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPb + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ManifestChangeSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ManifestChangeSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ManifestChangeSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Changes = append(m.Changes, &ManifestChange{}) + if err := m.Changes[len(m.Changes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPb + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ManifestChange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ManifestChange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ManifestChange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) + } + m.Op = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Op |= ManifestChange_Operation(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + m.Level = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Level |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Checksum", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthPb + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthPb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Checksum = append(m.Checksum[:0], dAtA[iNdEx:postIndex]...) + if m.Checksum == nil { + m.Checksum = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPb + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPb(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthPb + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthPb + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPb(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthPb + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPb = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPb = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/dgraph-io/badger/pb/pb.proto b/vendor/github.com/dgraph-io/badger/pb/pb.proto new file mode 100644 index 00000000..b790cf69 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/pb/pb.proto @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Use protos/gen.sh to generate .pb.go files. +syntax = "proto3"; + +package pb; + +message KV { + bytes key = 1; + bytes value = 2; + bytes user_meta = 3; + uint64 version = 4; + uint64 expires_at = 5; + bytes meta = 6; +} + +message KVList { + repeated KV kv = 1; +} + +message ManifestChangeSet { + // A set of changes that are applied atomically. + repeated ManifestChange changes = 1; +} + +message ManifestChange { + uint64 Id = 1; + enum Operation { + CREATE = 0; + DELETE = 1; + } + Operation Op = 2; + uint32 Level = 3; // Only used for CREATE + bytes Checksum = 4; // Only used for CREATE +} diff --git a/vendor/github.com/dgraph-io/badger/skl/README.md b/vendor/github.com/dgraph-io/badger/skl/README.md new file mode 100644 index 00000000..92fa68bb --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/skl/README.md @@ -0,0 +1,113 @@ +This is much better than `skiplist` and `slist`. + +``` +BenchmarkReadWrite/frac_0-8 3000000 537 ns/op +BenchmarkReadWrite/frac_1-8 3000000 503 ns/op +BenchmarkReadWrite/frac_2-8 3000000 492 ns/op +BenchmarkReadWrite/frac_3-8 3000000 475 ns/op +BenchmarkReadWrite/frac_4-8 3000000 440 ns/op +BenchmarkReadWrite/frac_5-8 5000000 442 ns/op +BenchmarkReadWrite/frac_6-8 5000000 380 ns/op +BenchmarkReadWrite/frac_7-8 5000000 338 ns/op +BenchmarkReadWrite/frac_8-8 5000000 294 ns/op +BenchmarkReadWrite/frac_9-8 10000000 268 ns/op +BenchmarkReadWrite/frac_10-8 100000000 26.3 ns/op +``` + +And even better than a simple map with read-write lock: + +``` +BenchmarkReadWriteMap/frac_0-8 2000000 774 ns/op +BenchmarkReadWriteMap/frac_1-8 2000000 647 ns/op +BenchmarkReadWriteMap/frac_2-8 3000000 605 ns/op +BenchmarkReadWriteMap/frac_3-8 3000000 603 ns/op +BenchmarkReadWriteMap/frac_4-8 3000000 556 ns/op +BenchmarkReadWriteMap/frac_5-8 3000000 472 ns/op +BenchmarkReadWriteMap/frac_6-8 3000000 476 ns/op +BenchmarkReadWriteMap/frac_7-8 3000000 457 ns/op +BenchmarkReadWriteMap/frac_8-8 5000000 444 ns/op +BenchmarkReadWriteMap/frac_9-8 5000000 361 ns/op +BenchmarkReadWriteMap/frac_10-8 10000000 212 ns/op +``` + +# Node Pooling + +Command used + +``` +rm -Rf tmp && /usr/bin/time -l ./populate -keys_mil 10 +``` + +For pprof results, we run without using /usr/bin/time. There are four runs below. + +Results seem to vary quite a bit between runs. + +## Before node pooling + +``` +1311.53MB of 1338.69MB total (97.97%) +Dropped 30 nodes (cum <= 6.69MB) +Showing top 10 nodes out of 37 (cum >= 12.50MB) + flat flat% sum% cum cum% + 523.04MB 39.07% 39.07% 523.04MB 39.07% github.com/dgraph-io/badger/skl.(*Skiplist).Put + 184.51MB 13.78% 52.85% 184.51MB 13.78% runtime.stringtoslicebyte + 166.01MB 12.40% 65.25% 689.04MB 51.47% github.com/dgraph-io/badger/mem.(*Table).Put + 165MB 12.33% 77.58% 165MB 12.33% runtime.convT2E + 116.92MB 8.73% 86.31% 116.92MB 8.73% bytes.makeSlice + 62.50MB 4.67% 90.98% 62.50MB 4.67% main.newValue + 34.50MB 2.58% 93.56% 34.50MB 2.58% github.com/dgraph-io/badger/table.(*BlockIterator).parseKV + 25.50MB 1.90% 95.46% 100.06MB 7.47% github.com/dgraph-io/badger/y.(*MergeIterator).Next + 21.06MB 1.57% 97.04% 21.06MB 1.57% github.com/dgraph-io/badger/table.(*Table).read + 12.50MB 0.93% 97.97% 12.50MB 0.93% github.com/dgraph-io/badger/table.header.Encode + + 128.31 real 329.37 user 17.11 sys +3355660288 maximum resident set size + 0 average shared memory size + 0 average unshared data size + 0 average unshared stack size + 2203080 page reclaims + 764 page faults + 0 swaps + 275 block input operations + 76 block output operations + 0 messages sent + 0 messages received + 0 signals received + 49173 voluntary context switches + 599922 involuntary context switches +``` + +## After node pooling + +``` +1963.13MB of 2026.09MB total (96.89%) +Dropped 29 nodes (cum <= 10.13MB) +Showing top 10 nodes out of 41 (cum >= 185.62MB) + flat flat% sum% cum cum% + 658.05MB 32.48% 32.48% 658.05MB 32.48% github.com/dgraph-io/badger/skl.glob..func1 + 297.51MB 14.68% 47.16% 297.51MB 14.68% runtime.convT2E + 257.51MB 12.71% 59.87% 257.51MB 12.71% runtime.stringtoslicebyte + 249.01MB 12.29% 72.16% 1007.06MB 49.70% github.com/dgraph-io/badger/mem.(*Table).Put + 142.43MB 7.03% 79.19% 142.43MB 7.03% bytes.makeSlice + 100MB 4.94% 84.13% 758.05MB 37.41% github.com/dgraph-io/badger/skl.newNode + 99.50MB 4.91% 89.04% 99.50MB 4.91% main.newValue + 75MB 3.70% 92.74% 75MB 3.70% github.com/dgraph-io/badger/table.(*BlockIterator).parseKV + 44.62MB 2.20% 94.94% 44.62MB 2.20% github.com/dgraph-io/badger/table.(*Table).read + 39.50MB 1.95% 96.89% 185.62MB 9.16% github.com/dgraph-io/badger/y.(*MergeIterator).Next + + 135.58 real 374.29 user 17.65 sys +3740614656 maximum resident set size + 0 average shared memory size + 0 average unshared data size + 0 average unshared stack size + 2276566 page reclaims + 770 page faults + 0 swaps + 128 block input operations + 90 block output operations + 0 messages sent + 0 messages received + 0 signals received + 46434 voluntary context switches + 597049 involuntary context switches +``` \ No newline at end of file diff --git a/vendor/github.com/dgraph-io/badger/skl/arena.go b/vendor/github.com/dgraph-io/badger/skl/arena.go new file mode 100644 index 00000000..2decb75c --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/skl/arena.go @@ -0,0 +1,136 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package skl + +import ( + "sync/atomic" + "unsafe" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" +) + +const ( + offsetSize = int(unsafe.Sizeof(uint32(0))) + + // Always align nodes on 64-bit boundaries, even on 32-bit architectures, + // so that the node.value field is 64-bit aligned. This is necessary because + // node.getValueOffset uses atomic.LoadUint64, which expects its input + // pointer to be 64-bit aligned. + nodeAlign = int(unsafe.Sizeof(uint64(0))) - 1 +) + +// Arena should be lock-free. +type Arena struct { + n uint32 + buf []byte +} + +// newArena returns a new arena. +func newArena(n int64) *Arena { + // Don't store data at position 0 in order to reserve offset=0 as a kind + // of nil pointer. + out := &Arena{ + n: 1, + buf: make([]byte, n), + } + return out +} + +func (s *Arena) size() int64 { + return int64(atomic.LoadUint32(&s.n)) +} + +func (s *Arena) reset() { + atomic.StoreUint32(&s.n, 0) +} + +// putNode allocates a node in the arena. The node is aligned on a pointer-sized +// boundary. The arena offset of the node is returned. +func (s *Arena) putNode(height int) uint32 { + // Compute the amount of the tower that will never be used, since the height + // is less than maxHeight. + unusedSize := (maxHeight - height) * offsetSize + + // Pad the allocation with enough bytes to ensure pointer alignment. + l := uint32(MaxNodeSize - unusedSize + nodeAlign) + n := atomic.AddUint32(&s.n, l) + y.AssertTruef(int(n) <= len(s.buf), + "Arena too small, toWrite:%d newTotal:%d limit:%d", + l, n, len(s.buf)) + + // Return the aligned offset. + m := (n - l + uint32(nodeAlign)) & ^uint32(nodeAlign) + return m +} + +// Put will *copy* val into arena. To make better use of this, reuse your input +// val buffer. Returns an offset into buf. User is responsible for remembering +// size of val. We could also store this size inside arena but the encoding and +// decoding will incur some overhead. +func (s *Arena) putVal(v y.ValueStruct) uint32 { + l := uint32(v.EncodedSize()) + n := atomic.AddUint32(&s.n, l) + y.AssertTruef(int(n) <= len(s.buf), + "Arena too small, toWrite:%d newTotal:%d limit:%d", + l, n, len(s.buf)) + m := n - l + v.Encode(s.buf[m:]) + return m +} + +func (s *Arena) putKey(key []byte) uint32 { + l := uint32(len(key)) + n := atomic.AddUint32(&s.n, l) + y.AssertTruef(int(n) <= len(s.buf), + "Arena too small, toWrite:%d newTotal:%d limit:%d", + l, n, len(s.buf)) + m := n - l + y.AssertTrue(len(key) == copy(s.buf[m:n], key)) + return m +} + +// getNode returns a pointer to the node located at offset. If the offset is +// zero, then the nil node pointer is returned. +func (s *Arena) getNode(offset uint32) *node { + if offset == 0 { + return nil + } + + return (*node)(unsafe.Pointer(&s.buf[offset])) +} + +// getKey returns byte slice at offset. +func (s *Arena) getKey(offset uint32, size uint16) []byte { + return s.buf[offset : offset+uint32(size)] +} + +// getVal returns byte slice at offset. The given size should be just the value +// size and should NOT include the meta bytes. +func (s *Arena) getVal(offset uint32, size uint16) (ret y.ValueStruct) { + ret.Decode(s.buf[offset : offset+uint32(size)]) + return +} + +// getNodeOffset returns the offset of node in the arena. If the node pointer is +// nil, then the zero offset is returned. +func (s *Arena) getNodeOffset(nd *node) uint32 { + if nd == nil { + return 0 + } + + return uint32(uintptr(unsafe.Pointer(nd)) - uintptr(unsafe.Pointer(&s.buf[0]))) +} diff --git a/vendor/github.com/dgraph-io/badger/skl/skl.go b/vendor/github.com/dgraph-io/badger/skl/skl.go new file mode 100644 index 00000000..81a0506e --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/skl/skl.go @@ -0,0 +1,516 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* +Adapted from RocksDB inline skiplist. + +Key differences: +- No optimization for sequential inserts (no "prev"). +- No custom comparator. +- Support overwrites. This requires care when we see the same key when inserting. + For RocksDB or LevelDB, overwrites are implemented as a newer sequence number in the key, so + there is no need for values. We don't intend to support versioning. In-place updates of values + would be more efficient. +- We discard all non-concurrent code. +- We do not support Splices. This simplifies the code a lot. +- No AllocateNode or other pointer arithmetic. +- We combine the findLessThan, findGreaterOrEqual, etc into one function. +*/ + +package skl + +import ( + "math" + "math/rand" + "sync/atomic" + "unsafe" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" +) + +const ( + maxHeight = 20 + heightIncrease = math.MaxUint32 / 3 +) + +// MaxNodeSize is the memory footprint of a node of maximum height. +const MaxNodeSize = int(unsafe.Sizeof(node{})) + +type node struct { + // Multiple parts of the value are encoded as a single uint64 so that it + // can be atomically loaded and stored: + // value offset: uint32 (bits 0-31) + // value size : uint16 (bits 32-47) + value uint64 + + // A byte slice is 24 bytes. We are trying to save space here. + keyOffset uint32 // Immutable. No need to lock to access key. + keySize uint16 // Immutable. No need to lock to access key. + + // Height of the tower. + height uint16 + + // Most nodes do not need to use the full height of the tower, since the + // probability of each successive level decreases exponentially. Because + // these elements are never accessed, they do not need to be allocated. + // Therefore, when a node is allocated in the arena, its memory footprint + // is deliberately truncated to not include unneeded tower elements. + // + // All accesses to elements should use CAS operations, with no need to lock. + tower [maxHeight]uint32 +} + +// Skiplist maps keys to values (in memory) +type Skiplist struct { + height int32 // Current height. 1 <= height <= kMaxHeight. CAS. + head *node + ref int32 + arena *Arena +} + +// IncrRef increases the refcount +func (s *Skiplist) IncrRef() { + atomic.AddInt32(&s.ref, 1) +} + +// DecrRef decrements the refcount, deallocating the Skiplist when done using it +func (s *Skiplist) DecrRef() { + newRef := atomic.AddInt32(&s.ref, -1) + if newRef > 0 { + return + } + + s.arena.reset() + // Indicate we are closed. Good for testing. Also, lets GC reclaim memory. Race condition + // here would suggest we are accessing skiplist when we are supposed to have no reference! + s.arena = nil +} + +func (s *Skiplist) valid() bool { return s.arena != nil } + +func newNode(arena *Arena, key []byte, v y.ValueStruct, height int) *node { + // The base level is already allocated in the node struct. + offset := arena.putNode(height) + node := arena.getNode(offset) + node.keyOffset = arena.putKey(key) + node.keySize = uint16(len(key)) + node.height = uint16(height) + node.value = encodeValue(arena.putVal(v), v.EncodedSize()) + return node +} + +func encodeValue(valOffset uint32, valSize uint16) uint64 { + return uint64(valSize)<<32 | uint64(valOffset) +} + +func decodeValue(value uint64) (valOffset uint32, valSize uint16) { + valOffset = uint32(value) + valSize = uint16(value >> 32) + return +} + +// NewSkiplist makes a new empty skiplist, with a given arena size +func NewSkiplist(arenaSize int64) *Skiplist { + arena := newArena(arenaSize) + head := newNode(arena, nil, y.ValueStruct{}, maxHeight) + return &Skiplist{ + height: 1, + head: head, + arena: arena, + ref: 1, + } +} + +func (s *node) getValueOffset() (uint32, uint16) { + value := atomic.LoadUint64(&s.value) + return decodeValue(value) +} + +func (s *node) key(arena *Arena) []byte { + return arena.getKey(s.keyOffset, s.keySize) +} + +func (s *node) setValue(arena *Arena, v y.ValueStruct) { + valOffset := arena.putVal(v) + value := encodeValue(valOffset, v.EncodedSize()) + atomic.StoreUint64(&s.value, value) +} + +func (s *node) getNextOffset(h int) uint32 { + return atomic.LoadUint32(&s.tower[h]) +} + +func (s *node) casNextOffset(h int, old, val uint32) bool { + return atomic.CompareAndSwapUint32(&s.tower[h], old, val) +} + +// Returns true if key is strictly > n.key. +// If n is nil, this is an "end" marker and we return false. +//func (s *Skiplist) keyIsAfterNode(key []byte, n *node) bool { +// y.AssertTrue(n != s.head) +// return n != nil && y.CompareKeys(key, n.key) > 0 +//} + +func randomHeight() int { + h := 1 + for h < maxHeight && rand.Uint32() <= heightIncrease { + h++ + } + return h +} + +func (s *Skiplist) getNext(nd *node, height int) *node { + return s.arena.getNode(nd.getNextOffset(height)) +} + +// findNear finds the node near to key. +// If less=true, it finds rightmost node such that node.key < key (if allowEqual=false) or +// node.key <= key (if allowEqual=true). +// If less=false, it finds leftmost node such that node.key > key (if allowEqual=false) or +// node.key >= key (if allowEqual=true). +// Returns the node found. The bool returned is true if the node has key equal to given key. +func (s *Skiplist) findNear(key []byte, less bool, allowEqual bool) (*node, bool) { + x := s.head + level := int(s.getHeight() - 1) + for { + // Assume x.key < key. + next := s.getNext(x, level) + if next == nil { + // x.key < key < END OF LIST + if level > 0 { + // Can descend further to iterate closer to the end. + level-- + continue + } + // Level=0. Cannot descend further. Let's return something that makes sense. + if !less { + return nil, false + } + // Try to return x. Make sure it is not a head node. + if x == s.head { + return nil, false + } + return x, false + } + + nextKey := next.key(s.arena) + cmp := y.CompareKeys(key, nextKey) + if cmp > 0 { + // x.key < next.key < key. We can continue to move right. + x = next + continue + } + if cmp == 0 { + // x.key < key == next.key. + if allowEqual { + return next, true + } + if !less { + // We want >, so go to base level to grab the next bigger note. + return s.getNext(next, 0), false + } + // We want <. If not base level, we should go closer in the next level. + if level > 0 { + level-- + continue + } + // On base level. Return x. + if x == s.head { + return nil, false + } + return x, false + } + // cmp < 0. In other words, x.key < key < next. + if level > 0 { + level-- + continue + } + // At base level. Need to return something. + if !less { + return next, false + } + // Try to return x. Make sure it is not a head node. + if x == s.head { + return nil, false + } + return x, false + } +} + +// findSpliceForLevel returns (outBefore, outAfter) with outBefore.key <= key <= outAfter.key. +// The input "before" tells us where to start looking. +// If we found a node with the same key, then we return outBefore = outAfter. +// Otherwise, outBefore.key < key < outAfter.key. +func (s *Skiplist) findSpliceForLevel(key []byte, before *node, level int) (*node, *node) { + for { + // Assume before.key < key. + next := s.getNext(before, level) + if next == nil { + return before, next + } + nextKey := next.key(s.arena) + cmp := y.CompareKeys(key, nextKey) + if cmp == 0 { + // Equality case. + return next, next + } + if cmp < 0 { + // before.key < key < next.key. We are done for this level. + return before, next + } + before = next // Keep moving right on this level. + } +} + +func (s *Skiplist) getHeight() int32 { + return atomic.LoadInt32(&s.height) +} + +// Put inserts the key-value pair. +func (s *Skiplist) Put(key []byte, v y.ValueStruct) { + // Since we allow overwrite, we may not need to create a new node. We might not even need to + // increase the height. Let's defer these actions. + + listHeight := s.getHeight() + var prev [maxHeight + 1]*node + var next [maxHeight + 1]*node + prev[listHeight] = s.head + next[listHeight] = nil + for i := int(listHeight) - 1; i >= 0; i-- { + // Use higher level to speed up for current level. + prev[i], next[i] = s.findSpliceForLevel(key, prev[i+1], i) + if prev[i] == next[i] { + prev[i].setValue(s.arena, v) + return + } + } + + // We do need to create a new node. + height := randomHeight() + x := newNode(s.arena, key, v, height) + + // Try to increase s.height via CAS. + listHeight = s.getHeight() + for height > int(listHeight) { + if atomic.CompareAndSwapInt32(&s.height, listHeight, int32(height)) { + // Successfully increased skiplist.height. + break + } + listHeight = s.getHeight() + } + + // We always insert from the base level and up. After you add a node in base level, we cannot + // create a node in the level above because it would have discovered the node in the base level. + for i := 0; i < height; i++ { + for { + if prev[i] == nil { + y.AssertTrue(i > 1) // This cannot happen in base level. + // We haven't computed prev, next for this level because height exceeds old listHeight. + // For these levels, we expect the lists to be sparse, so we can just search from head. + prev[i], next[i] = s.findSpliceForLevel(key, s.head, i) + // Someone adds the exact same key before we are able to do so. This can only happen on + // the base level. But we know we are not on the base level. + y.AssertTrue(prev[i] != next[i]) + } + nextOffset := s.arena.getNodeOffset(next[i]) + x.tower[i] = nextOffset + if prev[i].casNextOffset(i, nextOffset, s.arena.getNodeOffset(x)) { + // Managed to insert x between prev[i] and next[i]. Go to the next level. + break + } + // CAS failed. We need to recompute prev and next. + // It is unlikely to be helpful to try to use a different level as we redo the search, + // because it is unlikely that lots of nodes are inserted between prev[i] and next[i]. + prev[i], next[i] = s.findSpliceForLevel(key, prev[i], i) + if prev[i] == next[i] { + y.AssertTruef(i == 0, "Equality can happen only on base level: %d", i) + prev[i].setValue(s.arena, v) + return + } + } + } +} + +// Empty returns if the Skiplist is empty. +func (s *Skiplist) Empty() bool { + return s.findLast() == nil +} + +// findLast returns the last element. If head (empty list), we return nil. All the find functions +// will NEVER return the head nodes. +func (s *Skiplist) findLast() *node { + n := s.head + level := int(s.getHeight()) - 1 + for { + next := s.getNext(n, level) + if next != nil { + n = next + continue + } + if level == 0 { + if n == s.head { + return nil + } + return n + } + level-- + } +} + +// Get gets the value associated with the key. It returns a valid value if it finds equal or earlier +// version of the same key. +func (s *Skiplist) Get(key []byte) y.ValueStruct { + n, _ := s.findNear(key, false, true) // findGreaterOrEqual. + if n == nil { + return y.ValueStruct{} + } + + nextKey := s.arena.getKey(n.keyOffset, n.keySize) + if !y.SameKey(key, nextKey) { + return y.ValueStruct{} + } + + valOffset, valSize := n.getValueOffset() + vs := s.arena.getVal(valOffset, valSize) + vs.Version = y.ParseTs(nextKey) + return vs +} + +// NewIterator returns a skiplist iterator. You have to Close() the iterator. +func (s *Skiplist) NewIterator() *Iterator { + s.IncrRef() + return &Iterator{list: s} +} + +// MemSize returns the size of the Skiplist in terms of how much memory is used within its internal +// arena. +func (s *Skiplist) MemSize() int64 { return s.arena.size() } + +// Iterator is an iterator over skiplist object. For new objects, you just +// need to initialize Iterator.list. +type Iterator struct { + list *Skiplist + n *node +} + +// Close frees the resources held by the iterator +func (s *Iterator) Close() error { + s.list.DecrRef() + return nil +} + +// Valid returns true iff the iterator is positioned at a valid node. +func (s *Iterator) Valid() bool { return s.n != nil } + +// Key returns the key at the current position. +func (s *Iterator) Key() []byte { + return s.list.arena.getKey(s.n.keyOffset, s.n.keySize) +} + +// Value returns value. +func (s *Iterator) Value() y.ValueStruct { + valOffset, valSize := s.n.getValueOffset() + return s.list.arena.getVal(valOffset, valSize) +} + +// Next advances to the next position. +func (s *Iterator) Next() { + y.AssertTrue(s.Valid()) + s.n = s.list.getNext(s.n, 0) +} + +// Prev advances to the previous position. +func (s *Iterator) Prev() { + y.AssertTrue(s.Valid()) + s.n, _ = s.list.findNear(s.Key(), true, false) // find <. No equality allowed. +} + +// Seek advances to the first entry with a key >= target. +func (s *Iterator) Seek(target []byte) { + s.n, _ = s.list.findNear(target, false, true) // find >=. +} + +// SeekForPrev finds an entry with key <= target. +func (s *Iterator) SeekForPrev(target []byte) { + s.n, _ = s.list.findNear(target, true, true) // find <=. +} + +// SeekToFirst seeks position at the first entry in list. +// Final state of iterator is Valid() iff list is not empty. +func (s *Iterator) SeekToFirst() { + s.n = s.list.getNext(s.list.head, 0) +} + +// SeekToLast seeks position at the last entry in list. +// Final state of iterator is Valid() iff list is not empty. +func (s *Iterator) SeekToLast() { + s.n = s.list.findLast() +} + +// UniIterator is a unidirectional memtable iterator. It is a thin wrapper around +// Iterator. We like to keep Iterator as before, because it is more powerful and +// we might support bidirectional iterators in the future. +type UniIterator struct { + iter *Iterator + reversed bool +} + +// NewUniIterator returns a UniIterator. +func (s *Skiplist) NewUniIterator(reversed bool) *UniIterator { + return &UniIterator{ + iter: s.NewIterator(), + reversed: reversed, + } +} + +// Next implements y.Interface +func (s *UniIterator) Next() { + if !s.reversed { + s.iter.Next() + } else { + s.iter.Prev() + } +} + +// Rewind implements y.Interface +func (s *UniIterator) Rewind() { + if !s.reversed { + s.iter.SeekToFirst() + } else { + s.iter.SeekToLast() + } +} + +// Seek implements y.Interface +func (s *UniIterator) Seek(key []byte) { + if !s.reversed { + s.iter.Seek(key) + } else { + s.iter.SeekForPrev(key) + } +} + +// Key implements y.Interface +func (s *UniIterator) Key() []byte { return s.iter.Key() } + +// Value implements y.Interface +func (s *UniIterator) Value() y.ValueStruct { return s.iter.Value() } + +// Valid implements y.Interface +func (s *UniIterator) Valid() bool { return s.iter.Valid() } + +// Close implements y.Interface (and frees up the iter's resources) +func (s *UniIterator) Close() error { return s.iter.Close() } diff --git a/vendor/github.com/dgraph-io/badger/skl/skl_test.go b/vendor/github.com/dgraph-io/badger/skl/skl_test.go new file mode 100644 index 00000000..cc695fbf --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/skl/skl_test.go @@ -0,0 +1,475 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package skl + +import ( + "encoding/binary" + "fmt" + "math/rand" + "strconv" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" +) + +const arenaSize = 1 << 20 + +func newValue(v int) []byte { + return []byte(fmt.Sprintf("%05d", v)) +} + +// length iterates over skiplist to give exact size. +func length(s *Skiplist) int { + x := s.getNext(s.head, 0) + count := 0 + for x != nil { + count++ + x = s.getNext(x, 0) + } + return count +} + +func TestEmpty(t *testing.T) { + key := []byte("aaa") + l := NewSkiplist(arenaSize) + + v := l.Get(key) + require.True(t, v.Value == nil) // Cannot use require.Nil for unsafe.Pointer nil. + + for _, less := range []bool{true, false} { + for _, allowEqual := range []bool{true, false} { + n, found := l.findNear(key, less, allowEqual) + require.Nil(t, n) + require.False(t, found) + } + } + + it := l.NewIterator() + require.False(t, it.Valid()) + + it.SeekToFirst() + require.False(t, it.Valid()) + + it.SeekToLast() + require.False(t, it.Valid()) + + it.Seek(key) + require.False(t, it.Valid()) + + l.DecrRef() + require.True(t, l.valid()) // Check the reference counting. + + it.Close() + require.False(t, l.valid()) // Check the reference counting. +} + +// TestBasic tests single-threaded inserts and updates and gets. +func TestBasic(t *testing.T) { + l := NewSkiplist(arenaSize) + val1 := newValue(42) + val2 := newValue(52) + val3 := newValue(62) + val4 := newValue(72) + + // Try inserting values. + // Somehow require.Nil doesn't work when checking for unsafe.Pointer(nil). + l.Put(y.KeyWithTs([]byte("key1"), 0), y.ValueStruct{Value: val1, Meta: 55, UserMeta: 0}) + l.Put(y.KeyWithTs([]byte("key2"), 2), y.ValueStruct{Value: val2, Meta: 56, UserMeta: 0}) + l.Put(y.KeyWithTs([]byte("key3"), 0), y.ValueStruct{Value: val3, Meta: 57, UserMeta: 0}) + + v := l.Get(y.KeyWithTs([]byte("key"), 0)) + require.True(t, v.Value == nil) + + v = l.Get(y.KeyWithTs([]byte("key1"), 0)) + require.True(t, v.Value != nil) + require.EqualValues(t, "00042", string(v.Value)) + require.EqualValues(t, 55, v.Meta) + + v = l.Get(y.KeyWithTs([]byte("key2"), 0)) + require.True(t, v.Value == nil) + + v = l.Get(y.KeyWithTs([]byte("key3"), 0)) + require.True(t, v.Value != nil) + require.EqualValues(t, "00062", string(v.Value)) + require.EqualValues(t, 57, v.Meta) + + l.Put(y.KeyWithTs([]byte("key3"), 1), y.ValueStruct{Value: val4, Meta: 12, UserMeta: 0}) + v = l.Get(y.KeyWithTs([]byte("key3"), 1)) + require.True(t, v.Value != nil) + require.EqualValues(t, "00072", string(v.Value)) + require.EqualValues(t, 12, v.Meta) +} + +// TestConcurrentBasic tests concurrent writes followed by concurrent reads. +func TestConcurrentBasic(t *testing.T) { + const n = 1000 + l := NewSkiplist(arenaSize) + var wg sync.WaitGroup + key := func(i int) []byte { + return y.KeyWithTs([]byte(fmt.Sprintf("%05d", i)), 0) + } + for i := 0; i < n; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + l.Put(key(i), + y.ValueStruct{Value: newValue(i), Meta: 0, UserMeta: 0}) + }(i) + } + wg.Wait() + // Check values. Concurrent reads. + for i := 0; i < n; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + v := l.Get(key(i)) + require.True(t, v.Value != nil) + require.EqualValues(t, newValue(i), v.Value) + }(i) + } + wg.Wait() + require.EqualValues(t, n, length(l)) +} + +// TestOneKey will read while writing to one single key. +func TestOneKey(t *testing.T) { + const n = 100 + key := y.KeyWithTs([]byte("thekey"), 0) + l := NewSkiplist(arenaSize) + defer l.DecrRef() + + var wg sync.WaitGroup + for i := 0; i < n; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + l.Put(key, y.ValueStruct{Value: newValue(i), Meta: 0, UserMeta: 0}) + }(i) + } + // We expect that at least some write made it such that some read returns a value. + var sawValue int32 + for i := 0; i < n; i++ { + wg.Add(1) + go func() { + defer wg.Done() + p := l.Get(key) + if p.Value == nil { + return + } + atomic.AddInt32(&sawValue, 1) + v, err := strconv.Atoi(string(p.Value)) + require.NoError(t, err) + require.True(t, 0 <= v && v < n, fmt.Sprintf("invalid value %d", v)) + }() + } + wg.Wait() + require.True(t, sawValue > 0) + require.EqualValues(t, 1, length(l)) +} + +func TestFindNear(t *testing.T) { + l := NewSkiplist(arenaSize) + defer l.DecrRef() + for i := 0; i < 1000; i++ { + key := fmt.Sprintf("%05d", i*10+5) + l.Put(y.KeyWithTs([]byte(key), 0), y.ValueStruct{Value: newValue(i), Meta: 0, UserMeta: 0}) + } + + n, eq := l.findNear(y.KeyWithTs([]byte("00001"), 0), false, false) + require.NotNil(t, n) + require.EqualValues(t, y.KeyWithTs([]byte("00005"), 0), string(n.key(l.arena))) + require.False(t, eq) + n, eq = l.findNear(y.KeyWithTs([]byte("00001"), 0), false, true) + require.NotNil(t, n) + require.EqualValues(t, y.KeyWithTs([]byte("00005"), 0), string(n.key(l.arena))) + require.False(t, eq) + n, eq = l.findNear(y.KeyWithTs([]byte("00001"), 0), true, false) + require.Nil(t, n) + require.False(t, eq) + n, eq = l.findNear(y.KeyWithTs([]byte("00001"), 0), true, true) + require.Nil(t, n) + require.False(t, eq) + + n, eq = l.findNear(y.KeyWithTs([]byte("00005"), 0), false, false) + require.NotNil(t, n) + require.EqualValues(t, y.KeyWithTs([]byte("00015"), 0), string(n.key(l.arena))) + require.False(t, eq) + n, eq = l.findNear(y.KeyWithTs([]byte("00005"), 0), false, true) + require.NotNil(t, n) + require.EqualValues(t, y.KeyWithTs([]byte("00005"), 0), string(n.key(l.arena))) + require.True(t, eq) + n, eq = l.findNear(y.KeyWithTs([]byte("00005"), 0), true, false) + require.Nil(t, n) + require.False(t, eq) + n, eq = l.findNear(y.KeyWithTs([]byte("00005"), 0), true, true) + require.NotNil(t, n) + require.EqualValues(t, y.KeyWithTs([]byte("00005"), 0), string(n.key(l.arena))) + require.True(t, eq) + + n, eq = l.findNear(y.KeyWithTs([]byte("05555"), 0), false, false) + require.NotNil(t, n) + require.EqualValues(t, y.KeyWithTs([]byte("05565"), 0), string(n.key(l.arena))) + require.False(t, eq) + n, eq = l.findNear(y.KeyWithTs([]byte("05555"), 0), false, true) + require.NotNil(t, n) + require.EqualValues(t, y.KeyWithTs([]byte("05555"), 0), string(n.key(l.arena))) + require.True(t, eq) + n, eq = l.findNear(y.KeyWithTs([]byte("05555"), 0), true, false) + require.NotNil(t, n) + require.EqualValues(t, y.KeyWithTs([]byte("05545"), 0), string(n.key(l.arena))) + require.False(t, eq) + n, eq = l.findNear(y.KeyWithTs([]byte("05555"), 0), true, true) + require.NotNil(t, n) + require.EqualValues(t, y.KeyWithTs([]byte("05555"), 0), string(n.key(l.arena))) + require.True(t, eq) + + n, eq = l.findNear(y.KeyWithTs([]byte("05558"), 0), false, false) + require.NotNil(t, n) + require.EqualValues(t, y.KeyWithTs([]byte("05565"), 0), string(n.key(l.arena))) + require.False(t, eq) + n, eq = l.findNear(y.KeyWithTs([]byte("05558"), 0), false, true) + require.NotNil(t, n) + require.EqualValues(t, y.KeyWithTs([]byte("05565"), 0), string(n.key(l.arena))) + require.False(t, eq) + n, eq = l.findNear(y.KeyWithTs([]byte("05558"), 0), true, false) + require.NotNil(t, n) + require.EqualValues(t, y.KeyWithTs([]byte("05555"), 0), string(n.key(l.arena))) + require.False(t, eq) + n, eq = l.findNear(y.KeyWithTs([]byte("05558"), 0), true, true) + require.NotNil(t, n) + require.EqualValues(t, y.KeyWithTs([]byte("05555"), 0), string(n.key(l.arena))) + require.False(t, eq) + + n, eq = l.findNear(y.KeyWithTs([]byte("09995"), 0), false, false) + require.Nil(t, n) + require.False(t, eq) + n, eq = l.findNear(y.KeyWithTs([]byte("09995"), 0), false, true) + require.NotNil(t, n) + require.EqualValues(t, y.KeyWithTs([]byte("09995"), 0), string(n.key(l.arena))) + require.True(t, eq) + n, eq = l.findNear(y.KeyWithTs([]byte("09995"), 0), true, false) + require.NotNil(t, n) + require.EqualValues(t, y.KeyWithTs([]byte("09985"), 0), string(n.key(l.arena))) + require.False(t, eq) + n, eq = l.findNear(y.KeyWithTs([]byte("09995"), 0), true, true) + require.NotNil(t, n) + require.EqualValues(t, y.KeyWithTs([]byte("09995"), 0), string(n.key(l.arena))) + require.True(t, eq) + + n, eq = l.findNear(y.KeyWithTs([]byte("59995"), 0), false, false) + require.Nil(t, n) + require.False(t, eq) + n, eq = l.findNear(y.KeyWithTs([]byte("59995"), 0), false, true) + require.Nil(t, n) + require.False(t, eq) + n, eq = l.findNear(y.KeyWithTs([]byte("59995"), 0), true, false) + require.NotNil(t, n) + require.EqualValues(t, y.KeyWithTs([]byte("09995"), 0), string(n.key(l.arena))) + require.False(t, eq) + n, eq = l.findNear(y.KeyWithTs([]byte("59995"), 0), true, true) + require.NotNil(t, n) + require.EqualValues(t, y.KeyWithTs([]byte("09995"), 0), string(n.key(l.arena))) + require.False(t, eq) +} + +// TestIteratorNext tests a basic iteration over all nodes from the beginning. +func TestIteratorNext(t *testing.T) { + const n = 100 + l := NewSkiplist(arenaSize) + defer l.DecrRef() + it := l.NewIterator() + defer it.Close() + require.False(t, it.Valid()) + it.SeekToFirst() + require.False(t, it.Valid()) + for i := n - 1; i >= 0; i-- { + l.Put(y.KeyWithTs([]byte(fmt.Sprintf("%05d", i)), 0), + y.ValueStruct{Value: newValue(i), Meta: 0, UserMeta: 0}) + } + it.SeekToFirst() + for i := 0; i < n; i++ { + require.True(t, it.Valid()) + v := it.Value() + require.EqualValues(t, newValue(i), v.Value) + it.Next() + } + require.False(t, it.Valid()) +} + +// TestIteratorPrev tests a basic iteration over all nodes from the end. +func TestIteratorPrev(t *testing.T) { + const n = 100 + l := NewSkiplist(arenaSize) + defer l.DecrRef() + it := l.NewIterator() + defer it.Close() + require.False(t, it.Valid()) + it.SeekToFirst() + require.False(t, it.Valid()) + for i := 0; i < n; i++ { + l.Put(y.KeyWithTs([]byte(fmt.Sprintf("%05d", i)), 0), + y.ValueStruct{Value: newValue(i), Meta: 0, UserMeta: 0}) + } + it.SeekToLast() + for i := n - 1; i >= 0; i-- { + require.True(t, it.Valid()) + v := it.Value() + require.EqualValues(t, newValue(i), v.Value) + it.Prev() + } + require.False(t, it.Valid()) +} + +// TestIteratorSeek tests Seek and SeekForPrev. +func TestIteratorSeek(t *testing.T) { + const n = 100 + l := NewSkiplist(arenaSize) + defer l.DecrRef() + + it := l.NewIterator() + defer it.Close() + + require.False(t, it.Valid()) + it.SeekToFirst() + require.False(t, it.Valid()) + // 1000, 1010, 1020, ..., 1990. + for i := n - 1; i >= 0; i-- { + v := i*10 + 1000 + l.Put(y.KeyWithTs([]byte(fmt.Sprintf("%05d", i*10+1000)), 0), + y.ValueStruct{Value: newValue(v), Meta: 0, UserMeta: 0}) + } + it.SeekToFirst() + require.True(t, it.Valid()) + v := it.Value() + require.EqualValues(t, "01000", v.Value) + + it.Seek(y.KeyWithTs([]byte("01000"), 0)) + require.True(t, it.Valid()) + v = it.Value() + require.EqualValues(t, "01000", v.Value) + + it.Seek(y.KeyWithTs([]byte("01005"), 0)) + require.True(t, it.Valid()) + v = it.Value() + require.EqualValues(t, "01010", v.Value) + + it.Seek(y.KeyWithTs([]byte("01010"), 0)) + require.True(t, it.Valid()) + v = it.Value() + require.EqualValues(t, "01010", v.Value) + + it.Seek(y.KeyWithTs([]byte("99999"), 0)) + require.False(t, it.Valid()) + + // Try SeekForPrev. + it.SeekForPrev(y.KeyWithTs([]byte("00"), 0)) + require.False(t, it.Valid()) + + it.SeekForPrev(y.KeyWithTs([]byte("01000"), 0)) + require.True(t, it.Valid()) + v = it.Value() + require.EqualValues(t, "01000", v.Value) + + it.SeekForPrev(y.KeyWithTs([]byte("01005"), 0)) + require.True(t, it.Valid()) + v = it.Value() + require.EqualValues(t, "01000", v.Value) + + it.SeekForPrev(y.KeyWithTs([]byte("01010"), 0)) + require.True(t, it.Valid()) + v = it.Value() + require.EqualValues(t, "01010", v.Value) + + it.SeekForPrev(y.KeyWithTs([]byte("99999"), 0)) + require.True(t, it.Valid()) + v = it.Value() + require.EqualValues(t, "01990", v.Value) +} + +func randomKey(rng *rand.Rand) []byte { + b := make([]byte, 8) + key := rng.Uint32() + key2 := rng.Uint32() + binary.LittleEndian.PutUint32(b, key) + binary.LittleEndian.PutUint32(b[4:], key2) + return y.KeyWithTs(b, 0) +} + +// Standard test. Some fraction is read. Some fraction is write. Writes have +// to go through mutex lock. +func BenchmarkReadWrite(b *testing.B) { + value := newValue(123) + for i := 0; i <= 10; i++ { + readFrac := float32(i) / 10.0 + b.Run(fmt.Sprintf("frac_%d", i), func(b *testing.B) { + l := NewSkiplist(int64((b.N + 1) * MaxNodeSize)) + defer l.DecrRef() + b.ResetTimer() + var count int + b.RunParallel(func(pb *testing.PB) { + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + for pb.Next() { + if rng.Float32() < readFrac { + v := l.Get(randomKey(rng)) + if v.Value != nil { + count++ + } + } else { + l.Put(randomKey(rng), y.ValueStruct{Value: value, Meta: 0, UserMeta: 0}) + } + } + }) + }) + } +} + +// Standard test. Some fraction is read. Some fraction is write. Writes have +// to go through mutex lock. +func BenchmarkReadWriteMap(b *testing.B) { + value := newValue(123) + for i := 0; i <= 10; i++ { + readFrac := float32(i) / 10.0 + b.Run(fmt.Sprintf("frac_%d", i), func(b *testing.B) { + m := make(map[string][]byte) + var mutex sync.RWMutex + b.ResetTimer() + var count int + b.RunParallel(func(pb *testing.PB) { + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + for pb.Next() { + if rand.Float32() < readFrac { + mutex.RLock() + _, ok := m[string(randomKey(rng))] + mutex.RUnlock() + if ok { + count++ + } + } else { + mutex.Lock() + m[string(randomKey(rng))] = value + mutex.Unlock() + } + } + }) + }) + } +} diff --git a/vendor/github.com/dgraph-io/badger/stream.go b/vendor/github.com/dgraph-io/badger/stream.go new file mode 100644 index 00000000..f40e5a2e --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/stream.go @@ -0,0 +1,347 @@ +/* + * Copyright 2018 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "bytes" + "context" + "sync" + "time" + + humanize "gx/ipfs/QmQMxG9D52TirZd9eLA37nxiNspnMRkKbyPWrVAa1gvtSy/go-humanize" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/pb" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" +) + +const pageSize = 4 << 20 // 4MB + +// Stream provides a framework to concurrently iterate over a snapshot of Badger, pick up +// key-values, batch them up and call Send. Stream does concurrent iteration over many smaller key +// ranges. It does NOT send keys in lexicographical sorted order. To get keys in sorted +// order, use Iterator. +type Stream struct { + // Prefix to only iterate over certain range of keys. If set to nil (default), Stream would + // iterate over the entire DB. + Prefix []byte + + // Number of goroutines to use for iterating over key ranges. Defaults to 16. + NumGo int + + // Badger would produce log entries in Infof to indicate the progress of Stream. LogPrefix can + // be used to help differentiate them from other activities. Default is "Badger.Stream". + LogPrefix string + + // ChooseKey is invoked each time a new key is encountered. Note that this is not called + // on every version of the value, only the first encountered version (i.e. the highest version + // of the value a key has). ChooseKey can be left nil to select all keys. + // + // Note: Calls to ChooseKey are concurrent. + ChooseKey func(item *Item) bool + + // KeyToList, similar to ChooseKey, is only invoked on the highest version of the value. It + // is upto the caller to iterate over the versions and generate zero, one or more KVs. It + // is expected that the user would advance the iterator to go through the versions of the + // values. However, the user MUST immediately return from this function on the first encounter + // with a mismatching key. See example usage in ToList function. Can be left nil to use ToList + // function by default. + // + // Note: Calls to KeyToList are concurrent. + KeyToList func(key []byte, itr *Iterator) (*pb.KVList, error) + + // This is the method where Stream sends the final output. All calls to Send are done by a + // single goroutine, i.e. logic within Send method can expect single threaded execution. + Send func(*pb.KVList) error + + readTs uint64 + db *DB + rangeCh chan keyRange + kvChan chan *pb.KVList +} + +// ToList is a default implementation of KeyToList. It picks up all valid versions of the key, +// skipping over deleted or expired keys. +func (st *Stream) ToList(key []byte, itr *Iterator) (*pb.KVList, error) { + list := &pb.KVList{} + for ; itr.Valid(); itr.Next() { + item := itr.Item() + if item.IsDeletedOrExpired() { + break + } + if !bytes.Equal(key, item.Key()) { + // Break out on the first encounter with another key. + break + } + + valCopy, err := item.ValueCopy(nil) + if err != nil { + return nil, err + } + kv := &pb.KV{ + Key: item.KeyCopy(nil), + Value: valCopy, + UserMeta: []byte{item.UserMeta()}, + Version: item.Version(), + ExpiresAt: item.ExpiresAt(), + } + list.Kv = append(list.Kv, kv) + if st.db.opt.NumVersionsToKeep == 1 { + break + } + + if item.DiscardEarlierVersions() { + break + } + } + return list, nil +} + +// keyRange is [start, end), including start, excluding end. Do ensure that the start, +// end byte slices are owned by keyRange struct. +func (st *Stream) produceRanges(ctx context.Context) { + splits := st.db.KeySplits(st.Prefix) + start := y.SafeCopy(nil, st.Prefix) + for _, key := range splits { + st.rangeCh <- keyRange{left: start, right: y.SafeCopy(nil, []byte(key))} + start = y.SafeCopy(nil, []byte(key)) + } + // Edge case: prefix is empty and no splits exist. In that case, we should have at least one + // keyRange output. + st.rangeCh <- keyRange{left: start} + close(st.rangeCh) +} + +// produceKVs picks up ranges from rangeCh, generates KV lists and sends them to kvChan. +func (st *Stream) produceKVs(ctx context.Context) error { + var size int + var txn *Txn + if st.readTs > 0 { + txn = st.db.NewTransactionAt(st.readTs, false) + } else { + txn = st.db.NewTransaction(false) + } + defer txn.Discard() + + iterate := func(kr keyRange) error { + iterOpts := DefaultIteratorOptions + iterOpts.AllVersions = true + iterOpts.Prefix = st.Prefix + iterOpts.PrefetchValues = false + itr := txn.NewIterator(iterOpts) + defer itr.Close() + + outList := new(pb.KVList) + var prevKey []byte + for itr.Seek(kr.left); itr.Valid(); { + // it.Valid would only return true for keys with the provided Prefix in iterOpts. + item := itr.Item() + if bytes.Equal(item.Key(), prevKey) { + itr.Next() + continue + } + prevKey = append(prevKey[:0], item.Key()...) + + // Check if we reached the end of the key range. + if len(kr.right) > 0 && bytes.Compare(item.Key(), kr.right) >= 0 { + break + } + // Check if we should pick this key. + if st.ChooseKey != nil && !st.ChooseKey(item) { + continue + } + + // Now convert to key value. + list, err := st.KeyToList(item.KeyCopy(nil), itr) + if err != nil { + return err + } + if list == nil || len(list.Kv) == 0 { + continue + } + outList.Kv = append(outList.Kv, list.Kv...) + size += list.Size() + if size >= pageSize { + st.kvChan <- outList + outList = new(pb.KVList) + size = 0 + } + } + if len(outList.Kv) > 0 { + st.kvChan <- outList + } + return nil + } + + for { + select { + case kr, ok := <-st.rangeCh: + if !ok { + // Done with the keys. + return nil + } + if err := iterate(kr); err != nil { + return err + } + case <-ctx.Done(): + return ctx.Err() + } + } +} + +func (st *Stream) streamKVs(ctx context.Context) error { + var count int + var bytesSent uint64 + t := time.NewTicker(time.Second) + defer t.Stop() + now := time.Now() + + slurp := func(batch *pb.KVList) error { + loop: + for { + select { + case kvs, ok := <-st.kvChan: + if !ok { + break loop + } + y.AssertTrue(kvs != nil) + batch.Kv = append(batch.Kv, kvs.Kv...) + default: + break loop + } + } + sz := uint64(batch.Size()) + bytesSent += sz + count += len(batch.Kv) + t := time.Now() + if err := st.Send(batch); err != nil { + return err + } + st.db.opt.Infof("%s Created batch of size: %s in %s.\n", + st.LogPrefix, humanize.Bytes(sz), time.Since(t)) + return nil + } + +outer: + for { + var batch *pb.KVList + select { + case <-ctx.Done(): + return ctx.Err() + + case <-t.C: + dur := time.Since(now) + durSec := uint64(dur.Seconds()) + if durSec == 0 { + continue + } + speed := bytesSent / durSec + st.db.opt.Infof("%s Time elapsed: %s, bytes sent: %s, speed: %s/sec\n", st.LogPrefix, + y.FixedDuration(dur), humanize.Bytes(bytesSent), humanize.Bytes(speed)) + + case kvs, ok := <-st.kvChan: + if !ok { + break outer + } + y.AssertTrue(kvs != nil) + batch = kvs + if err := slurp(batch); err != nil { + return err + } + } + } + + st.db.opt.Infof("%s Sent %d keys\n", st.LogPrefix, count) + return nil +} + +// Orchestrate runs Stream. It picks up ranges from the SSTables, then runs NumGo number of +// goroutines to iterate over these ranges and batch up KVs in lists. It concurrently runs a single +// goroutine to pick these lists, batch them up further and send to Output.Send. Orchestrate also +// spits logs out to Infof, using provided LogPrefix. Note that all calls to Output.Send +// are serial. In case any of these steps encounter an error, Orchestrate would stop execution and +// return that error. Orchestrate can be called multiple times, but in serial order. +func (st *Stream) Orchestrate(ctx context.Context) error { + st.rangeCh = make(chan keyRange, 3) // Contains keys for posting lists. + + // kvChan should only have a small capacity to ensure that we don't buffer up too much data if + // sending is slow. Page size is set to 4MB, which is used to lazily cap the size of each + // KVList. To get around 64MB buffer, we can set the channel size to 16. + st.kvChan = make(chan *pb.KVList, 16) + + if st.KeyToList == nil { + st.KeyToList = st.ToList + } + + // Picks up ranges from Badger, and sends them to rangeCh. + go st.produceRanges(ctx) + + errCh := make(chan error, 1) // Stores error by consumeKeys. + var wg sync.WaitGroup + for i := 0; i < st.NumGo; i++ { + wg.Add(1) + go func() { + defer wg.Done() + // Picks up ranges from rangeCh, generates KV lists, and sends them to kvChan. + if err := st.produceKVs(ctx); err != nil { + select { + case errCh <- err: + default: + } + } + }() + } + + // Pick up key-values from kvChan and send to stream. + kvErr := make(chan error, 1) + go func() { + // Picks up KV lists from kvChan, and sends them to Output. + kvErr <- st.streamKVs(ctx) + }() + wg.Wait() // Wait for produceKVs to be over. + close(st.kvChan) // Now we can close kvChan. + + select { + case err := <-errCh: // Check error from produceKVs. + return err + default: + } + + // Wait for key streaming to be over. + err := <-kvErr + return err +} + +func (db *DB) newStream() *Stream { + return &Stream{db: db, NumGo: 16, LogPrefix: "Badger.Stream"} +} + +// NewStream creates a new Stream. +func (db *DB) NewStream() *Stream { + if db.opt.managedTxns { + panic("This API can not be called in managed mode.") + } + return db.newStream() +} + +// NewStreamAt creates a new Stream at a particular timestamp. Should only be used with managed DB. +func (db *DB) NewStreamAt(readTs uint64) *Stream { + if !db.opt.managedTxns { + panic("This API can only be called in managed mode.") + } + stream := db.newStream() + stream.readTs = readTs + return stream +} diff --git a/vendor/github.com/dgraph-io/badger/stream_test.go b/vendor/github.com/dgraph-io/badger/stream_test.go new file mode 100644 index 00000000..cdb1ec56 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/stream_test.go @@ -0,0 +1,169 @@ +/* + * Copyright 2018 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "context" + "fmt" + "io/ioutil" + "math" + "os" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/require" + bpb "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/pb" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" +) + +func openManaged(dir string) (*DB, error) { + opt := DefaultOptions + opt.Dir = dir + opt.ValueDir = dir + + return OpenManaged(opt) +} + +func keyWithPrefix(prefix string, k int) []byte { + return []byte(fmt.Sprintf("%s-%d", prefix, k)) +} + +func keyToInt(k []byte) (string, int) { + splits := strings.Split(string(k), "-") + key, err := strconv.Atoi(splits[1]) + y.Check(err) + return splits[0], key +} + +func value(k int) []byte { + return []byte(fmt.Sprintf("%08d", k)) +} + +type collector struct { + kv []*bpb.KV +} + +func (c *collector) Send(list *bpb.KVList) error { + c.kv = append(c.kv, list.Kv...) + return nil +} + +var ctxb = context.Background() + +func TestStream(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + + db, err := openManaged(dir) + require.NoError(t, err) + + var count int + for _, prefix := range []string{"p0", "p1", "p2"} { + txn := db.NewTransactionAt(math.MaxUint64, true) + for i := 1; i <= 100; i++ { + require.NoError(t, txn.Set(keyWithPrefix(prefix, i), value(i))) + count++ + } + require.NoError(t, txn.CommitAt(5, nil)) + } + + stream := db.NewStreamAt(math.MaxUint64) + stream.LogPrefix = "Testing" + c := &collector{} + stream.Send = func(list *bpb.KVList) error { + return c.Send(list) + } + + // Test case 1. Retrieve everything. + err = stream.Orchestrate(ctxb) + require.NoError(t, err) + require.Equal(t, 300, len(c.kv), "Expected 300. Got: %d", len(c.kv)) + + m := make(map[string]int) + for _, kv := range c.kv { + prefix, ki := keyToInt(kv.Key) + expected := value(ki) + require.Equal(t, expected, kv.Value) + m[prefix]++ + } + require.Equal(t, 3, len(m)) + for pred, count := range m { + require.Equal(t, 100, count, "Count mismatch for pred: %s", pred) + } + + // Test case 2. Retrieve only 1 predicate. + stream.Prefix = []byte("p1") + c.kv = c.kv[:0] + err = stream.Orchestrate(ctxb) + require.NoError(t, err) + require.Equal(t, 100, len(c.kv), "Expected 100. Got: %d", len(c.kv)) + + m = make(map[string]int) + for _, kv := range c.kv { + prefix, ki := keyToInt(kv.Key) + expected := value(ki) + require.Equal(t, expected, kv.Value) + m[prefix]++ + } + require.Equal(t, 1, len(m)) + for pred, count := range m { + require.Equal(t, 100, count, "Count mismatch for pred: %s", pred) + } + + // Test case 3. Retrieve select keys within the predicate. + c.kv = c.kv[:0] + stream.ChooseKey = func(item *Item) bool { + _, k := keyToInt(item.Key()) + return k%2 == 0 + } + err = stream.Orchestrate(ctxb) + require.NoError(t, err) + require.Equal(t, 50, len(c.kv), "Expected 50. Got: %d", len(c.kv)) + + m = make(map[string]int) + for _, kv := range c.kv { + prefix, ki := keyToInt(kv.Key) + expected := value(ki) + require.Equal(t, expected, kv.Value) + m[prefix]++ + } + require.Equal(t, 1, len(m)) + for pred, count := range m { + require.Equal(t, 50, count, "Count mismatch for pred: %s", pred) + } + + // Test case 4. Retrieve select keys from all predicates. + c.kv = c.kv[:0] + stream.Prefix = []byte{} + err = stream.Orchestrate(ctxb) + require.NoError(t, err) + require.Equal(t, 150, len(c.kv), "Expected 150. Got: %d", len(c.kv)) + + m = make(map[string]int) + for _, kv := range c.kv { + prefix, ki := keyToInt(kv.Key) + expected := value(ki) + require.Equal(t, expected, kv.Value) + m[prefix]++ + } + require.Equal(t, 3, len(m)) + for pred, count := range m { + require.Equal(t, 50, count, "Count mismatch for pred: %s", pred) + } +} diff --git a/vendor/github.com/dgraph-io/badger/structs.go b/vendor/github.com/dgraph-io/badger/structs.go new file mode 100644 index 00000000..eaeeb1d4 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/structs.go @@ -0,0 +1,132 @@ +package badger + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash/crc32" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" +) + +type valuePointer struct { + Fid uint32 + Len uint32 + Offset uint32 +} + +func (p valuePointer) Less(o valuePointer) bool { + if p.Fid != o.Fid { + return p.Fid < o.Fid + } + if p.Offset != o.Offset { + return p.Offset < o.Offset + } + return p.Len < o.Len +} + +func (p valuePointer) IsZero() bool { + return p.Fid == 0 && p.Offset == 0 && p.Len == 0 +} + +const vptrSize = 12 + +// Encode encodes Pointer into byte buffer. +func (p valuePointer) Encode(b []byte) []byte { + binary.BigEndian.PutUint32(b[:4], p.Fid) + binary.BigEndian.PutUint32(b[4:8], p.Len) + binary.BigEndian.PutUint32(b[8:12], p.Offset) + return b[:vptrSize] +} + +func (p *valuePointer) Decode(b []byte) { + p.Fid = binary.BigEndian.Uint32(b[:4]) + p.Len = binary.BigEndian.Uint32(b[4:8]) + p.Offset = binary.BigEndian.Uint32(b[8:12]) +} + +// header is used in value log as a header before Entry. +type header struct { + klen uint32 + vlen uint32 + expiresAt uint64 + meta byte + userMeta byte +} + +const ( + headerBufSize = 18 +) + +func (h header) Encode(out []byte) { + y.AssertTrue(len(out) >= headerBufSize) + binary.BigEndian.PutUint32(out[0:4], h.klen) + binary.BigEndian.PutUint32(out[4:8], h.vlen) + binary.BigEndian.PutUint64(out[8:16], h.expiresAt) + out[16] = h.meta + out[17] = h.userMeta +} + +// Decodes h from buf. +func (h *header) Decode(buf []byte) { + h.klen = binary.BigEndian.Uint32(buf[0:4]) + h.vlen = binary.BigEndian.Uint32(buf[4:8]) + h.expiresAt = binary.BigEndian.Uint64(buf[8:16]) + h.meta = buf[16] + h.userMeta = buf[17] +} + +// Entry provides Key, Value, UserMeta and ExpiresAt. This struct can be used by the user to set data. +type Entry struct { + Key []byte + Value []byte + UserMeta byte + ExpiresAt uint64 // time.Unix + meta byte + + // Fields maintained internally. + offset uint32 +} + +func (e *Entry) estimateSize(threshold int) int { + if len(e.Value) < threshold { + return len(e.Key) + len(e.Value) + 2 // Meta, UserMeta + } + return len(e.Key) + 12 + 2 // 12 for ValuePointer, 2 for metas. +} + +// Encodes e to buf. Returns number of bytes written. +func encodeEntry(e *Entry, buf *bytes.Buffer) (int, error) { + h := header{ + klen: uint32(len(e.Key)), + vlen: uint32(len(e.Value)), + expiresAt: e.ExpiresAt, + meta: e.meta, + userMeta: e.UserMeta, + } + + var headerEnc [headerBufSize]byte + h.Encode(headerEnc[:]) + + hash := crc32.New(y.CastagnoliCrcTable) + + buf.Write(headerEnc[:]) + hash.Write(headerEnc[:]) + + buf.Write(e.Key) + hash.Write(e.Key) + + buf.Write(e.Value) + hash.Write(e.Value) + + var crcBuf [crc32.Size]byte + binary.BigEndian.PutUint32(crcBuf[:], hash.Sum32()) + buf.Write(crcBuf[:]) + + return len(headerEnc) + len(e.Key) + len(e.Value) + len(crcBuf), nil +} + +func (e Entry) print(prefix string) { + fmt.Printf("%s Key: %s Meta: %d UserMeta: %d Offset: %d len(val)=%d", + prefix, e.Key, e.meta, e.UserMeta, e.offset, len(e.Value)) +} diff --git a/vendor/github.com/dgraph-io/badger/table/README.md b/vendor/github.com/dgraph-io/badger/table/README.md new file mode 100644 index 00000000..5d33e96a --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/table/README.md @@ -0,0 +1,51 @@ +# BenchmarkRead + +``` +$ go test -bench Read$ -count 3 + +Size of table: 105843444 +BenchmarkRead-8 3 343846914 ns/op +BenchmarkRead-8 3 351790907 ns/op +BenchmarkRead-8 3 351762823 ns/op +``` + +Size of table is 105,843,444 bytes, which is ~101M. + +The rate is ~287M/s which matches our read speed. This is using mmap. + +To read a 64M table, this would take ~0.22s, which is negligible. + +``` +$ go test -bench BenchmarkReadAndBuild -count 3 + +BenchmarkReadAndBuild-8 1 2341034225 ns/op +BenchmarkReadAndBuild-8 1 2346349671 ns/op +BenchmarkReadAndBuild-8 1 2364064576 ns/op +``` + +The rate is ~43M/s. To build a ~64M table, this would take ~1.5s. Note that this +does NOT include the flushing of the table to disk. All we are doing above is +to read one table (mmaped) and write one table in memory. + +The table building takes 1.5-0.22 ~ 1.3s. + +If we are writing out up to 10 tables, this would take 1.5*10 ~ 15s, and ~13s +is spent building the tables. + +When running populate, building one table in memory tends to take ~1.5s to ~2.5s +on my system. Where does this overhead come from? Let's investigate the merging. + +Below, we merge 5 tables. The total size remains unchanged at ~101M. + +``` +$ go test -bench ReadMerged -count 3 +BenchmarkReadMerged-8 1 1321190264 ns/op +BenchmarkReadMerged-8 1 1296958737 ns/op +BenchmarkReadMerged-8 1 1314381178 ns/op +``` + +The rate is ~76M/s. To build a 64M table, this would take ~0.84s. The writing +takes ~1.3s as we saw above. So in total, we expect around 0.84+1.3 ~ 2.1s. +This roughly matches what we observe when running populate. There might be +some additional overhead due to the concurrent writes going on, in flushing the +table to disk. Also, the tables tend to be slightly bigger than 64M/s. \ No newline at end of file diff --git a/vendor/github.com/dgraph-io/badger/table/builder.go b/vendor/github.com/dgraph-io/badger/table/builder.go new file mode 100644 index 00000000..a19c6589 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/table/builder.go @@ -0,0 +1,235 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package table + +import ( + "bytes" + "encoding/binary" + "io" + "math" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" + "gx/ipfs/QmWaLViWQF8jgyoLLqqcSrnp6dJpHESiJfzor1vrfDyTZf/bbloom" +) + +var ( + restartInterval = 100 // Might want to change this to be based on total size instead of numKeys. +) + +func newBuffer(sz int) *bytes.Buffer { + b := new(bytes.Buffer) + b.Grow(sz) + return b +} + +type header struct { + plen uint16 // Overlap with base key. + klen uint16 // Length of the diff. + vlen uint16 // Length of value. + prev uint32 // Offset for the previous key-value pair. The offset is relative to block base offset. +} + +// Encode encodes the header. +func (h header) Encode(b []byte) { + binary.BigEndian.PutUint16(b[0:2], h.plen) + binary.BigEndian.PutUint16(b[2:4], h.klen) + binary.BigEndian.PutUint16(b[4:6], h.vlen) + binary.BigEndian.PutUint32(b[6:10], h.prev) +} + +// Decode decodes the header. +func (h *header) Decode(buf []byte) int { + h.plen = binary.BigEndian.Uint16(buf[0:2]) + h.klen = binary.BigEndian.Uint16(buf[2:4]) + h.vlen = binary.BigEndian.Uint16(buf[4:6]) + h.prev = binary.BigEndian.Uint32(buf[6:10]) + return h.Size() +} + +// Size returns size of the header. Currently it's just a constant. +func (h header) Size() int { return 10 } + +// Builder is used in building a table. +type Builder struct { + counter int // Number of keys written for the current block. + + // Typically tens or hundreds of meg. This is for one single file. + buf *bytes.Buffer + + baseKey []byte // Base key for the current block. + baseOffset uint32 // Offset for the current block. + + restarts []uint32 // Base offsets of every block. + + // Tracks offset for the previous key-value pair. Offset is relative to block base offset. + prevOffset uint32 + + keyBuf *bytes.Buffer + keyCount int +} + +// NewTableBuilder makes a new TableBuilder. +func NewTableBuilder() *Builder { + return &Builder{ + keyBuf: newBuffer(1 << 20), + buf: newBuffer(1 << 20), + prevOffset: math.MaxUint32, // Used for the first element! + } +} + +// Close closes the TableBuilder. +func (b *Builder) Close() {} + +// Empty returns whether it's empty. +func (b *Builder) Empty() bool { return b.buf.Len() == 0 } + +// keyDiff returns a suffix of newKey that is different from b.baseKey. +func (b Builder) keyDiff(newKey []byte) []byte { + var i int + for i = 0; i < len(newKey) && i < len(b.baseKey); i++ { + if newKey[i] != b.baseKey[i] { + break + } + } + return newKey[i:] +} + +func (b *Builder) addHelper(key []byte, v y.ValueStruct) { + // Add key to bloom filter. + if len(key) > 0 { + var klen [2]byte + keyNoTs := y.ParseKey(key) + binary.BigEndian.PutUint16(klen[:], uint16(len(keyNoTs))) + b.keyBuf.Write(klen[:]) + b.keyBuf.Write(keyNoTs) + b.keyCount++ + } + + // diffKey stores the difference of key with baseKey. + var diffKey []byte + if len(b.baseKey) == 0 { + // Make a copy. Builder should not keep references. Otherwise, caller has to be very careful + // and will have to make copies of keys every time they add to builder, which is even worse. + b.baseKey = append(b.baseKey[:0], key...) + diffKey = key + } else { + diffKey = b.keyDiff(key) + } + + h := header{ + plen: uint16(len(key) - len(diffKey)), + klen: uint16(len(diffKey)), + vlen: uint16(v.EncodedSize()), + prev: b.prevOffset, // prevOffset is the location of the last key-value added. + } + b.prevOffset = uint32(b.buf.Len()) - b.baseOffset // Remember current offset for the next Add call. + + // Layout: header, diffKey, value. + var hbuf [10]byte + h.Encode(hbuf[:]) + b.buf.Write(hbuf[:]) + b.buf.Write(diffKey) // We only need to store the key difference. + + v.EncodeTo(b.buf) + b.counter++ // Increment number of keys added for this current block. +} + +func (b *Builder) finishBlock() { + // When we are at the end of the block and Valid=false, and the user wants to do a Prev, + // we need a dummy header to tell us the offset of the previous key-value pair. + b.addHelper([]byte{}, y.ValueStruct{}) +} + +// Add adds a key-value pair to the block. +// If doNotRestart is true, we will not restart even if b.counter >= restartInterval. +func (b *Builder) Add(key []byte, value y.ValueStruct) error { + if b.counter >= restartInterval { + b.finishBlock() + // Start a new block. Initialize the block. + b.restarts = append(b.restarts, uint32(b.buf.Len())) + b.counter = 0 + b.baseKey = []byte{} + b.baseOffset = uint32(b.buf.Len()) + b.prevOffset = math.MaxUint32 // First key-value pair of block has header.prev=MaxInt. + } + b.addHelper(key, value) + return nil // Currently, there is no meaningful error. +} + +// TODO: vvv this was the comment on ReachedCapacity. +// FinalSize returns the *rough* final size of the array, counting the header which is not yet written. +// TODO: Look into why there is a discrepancy. I suspect it is because of Write(empty, empty) +// at the end. The diff can vary. + +// ReachedCapacity returns true if we... roughly (?) reached capacity? +func (b *Builder) ReachedCapacity(cap int64) bool { + estimateSz := b.buf.Len() + 8 /* empty header */ + 4*len(b.restarts) + 8 // 8 = end of buf offset + len(restarts). + return int64(estimateSz) > cap +} + +// blockIndex generates the block index for the table. +// It is mainly a list of all the block base offsets. +func (b *Builder) blockIndex() []byte { + // Store the end offset, so we know the length of the final block. + b.restarts = append(b.restarts, uint32(b.buf.Len())) + + // Add 4 because we want to write out number of restarts at the end. + sz := 4*len(b.restarts) + 4 + out := make([]byte, sz) + buf := out + for _, r := range b.restarts { + binary.BigEndian.PutUint32(buf[:4], r) + buf = buf[4:] + } + binary.BigEndian.PutUint32(buf[:4], uint32(len(b.restarts))) + return out +} + +// Finish finishes the table by appending the index. +func (b *Builder) Finish() []byte { + bf, _ := bbloom.New(float64(b.keyCount), 0.01) + var klen [2]byte + key := make([]byte, 1024) + for { + if _, err := b.keyBuf.Read(klen[:]); err == io.EOF { + break + } else if err != nil { + y.Check(err) + } + kl := int(binary.BigEndian.Uint16(klen[:])) + if cap(key) < kl { + key = make([]byte, 2*int(kl)) // 2 * uint16 will overflow + } + key = key[:kl] + y.Check2(b.keyBuf.Read(key)) + bf.Add(key) + } + + b.finishBlock() // This will never start a new block. + index := b.blockIndex() + b.buf.Write(index) + + // Write bloom filter. + bdata, _ := bf.JSONMarshal() + n, err := b.buf.Write(bdata) + y.Check(err) + var buf [4]byte + binary.BigEndian.PutUint32(buf[:], uint32(n)) + b.buf.Write(buf[:]) + + return b.buf.Bytes() +} diff --git a/vendor/github.com/dgraph-io/badger/table/iterator.go b/vendor/github.com/dgraph-io/badger/table/iterator.go new file mode 100644 index 00000000..28feb99e --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/table/iterator.go @@ -0,0 +1,539 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package table + +import ( + "bytes" + "io" + "math" + "sort" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" + "gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors" +) + +type blockIterator struct { + data []byte + pos uint32 + err error + baseKey []byte + + key []byte + val []byte + init bool + + last header // The last header we saw. +} + +func (itr *blockIterator) Reset() { + itr.pos = 0 + itr.err = nil + itr.baseKey = []byte{} + itr.key = []byte{} + itr.val = []byte{} + itr.init = false + itr.last = header{} +} + +func (itr *blockIterator) Init() { + if !itr.init { + itr.Next() + } +} + +func (itr *blockIterator) Valid() bool { + return itr != nil && itr.err == nil +} + +func (itr *blockIterator) Error() error { + return itr.err +} + +func (itr *blockIterator) Close() {} + +var ( + origin = 0 + current = 1 +) + +// Seek brings us to the first block element that is >= input key. +func (itr *blockIterator) Seek(key []byte, whence int) { + itr.err = nil + + switch whence { + case origin: + itr.Reset() + case current: + } + + var done bool + for itr.Init(); itr.Valid(); itr.Next() { + k := itr.Key() + if y.CompareKeys(k, key) >= 0 { + // We are done as k is >= key. + done = true + break + } + } + if !done { + itr.err = io.EOF + } +} + +func (itr *blockIterator) SeekToFirst() { + itr.err = nil + itr.Init() +} + +// SeekToLast brings us to the last element. Valid should return true. +func (itr *blockIterator) SeekToLast() { + itr.err = nil + for itr.Init(); itr.Valid(); itr.Next() { + } + itr.Prev() +} + +// parseKV would allocate a new byte slice for key and for value. +func (itr *blockIterator) parseKV(h header) { + if cap(itr.key) < int(h.plen+h.klen) { + sz := int(h.plen) + int(h.klen) // Convert to int before adding to avoid uint16 overflow. + itr.key = make([]byte, 2*sz) + } + itr.key = itr.key[:h.plen+h.klen] + copy(itr.key, itr.baseKey[:h.plen]) + copy(itr.key[h.plen:], itr.data[itr.pos:itr.pos+uint32(h.klen)]) + itr.pos += uint32(h.klen) + + if itr.pos+uint32(h.vlen) > uint32(len(itr.data)) { + itr.err = errors.Errorf("Value exceeded size of block: %d %d %d %d %v", + itr.pos, h.klen, h.vlen, len(itr.data), h) + return + } + itr.val = y.SafeCopy(itr.val, itr.data[itr.pos:itr.pos+uint32(h.vlen)]) + itr.pos += uint32(h.vlen) +} + +func (itr *blockIterator) Next() { + itr.init = true + itr.err = nil + if itr.pos >= uint32(len(itr.data)) { + itr.err = io.EOF + return + } + + var h header + itr.pos += uint32(h.Decode(itr.data[itr.pos:])) + itr.last = h // Store the last header. + + if h.klen == 0 && h.plen == 0 { + // Last entry in the table. + itr.err = io.EOF + return + } + + // Populate baseKey if it isn't set yet. This would only happen for the first Next. + if len(itr.baseKey) == 0 { + // This should be the first Next() for this block. Hence, prefix length should be zero. + y.AssertTrue(h.plen == 0) + itr.baseKey = itr.data[itr.pos : itr.pos+uint32(h.klen)] + } + itr.parseKV(h) +} + +func (itr *blockIterator) Prev() { + if !itr.init { + return + } + itr.err = nil + if itr.last.prev == math.MaxUint32 { + // This is the first element of the block! + itr.err = io.EOF + itr.pos = 0 + return + } + + // Move back using current header's prev. + itr.pos = itr.last.prev + + var h header + y.AssertTruef(itr.pos < uint32(len(itr.data)), "%d %d", itr.pos, len(itr.data)) + itr.pos += uint32(h.Decode(itr.data[itr.pos:])) + itr.parseKV(h) + itr.last = h +} + +func (itr *blockIterator) Key() []byte { + if itr.err != nil { + return nil + } + return itr.key +} + +func (itr *blockIterator) Value() []byte { + if itr.err != nil { + return nil + } + return itr.val +} + +// Iterator is an iterator for a Table. +type Iterator struct { + t *Table + bpos int + bi *blockIterator + err error + + // Internally, Iterator is bidirectional. However, we only expose the + // unidirectional functionality for now. + reversed bool +} + +// NewIterator returns a new iterator of the Table +func (t *Table) NewIterator(reversed bool) *Iterator { + t.IncrRef() // Important. + ti := &Iterator{t: t, reversed: reversed} + ti.next() + return ti +} + +// Close closes the iterator (and it must be called). +func (itr *Iterator) Close() error { + return itr.t.DecrRef() +} + +func (itr *Iterator) reset() { + itr.bpos = 0 + itr.err = nil +} + +// Valid follows the y.Iterator interface +func (itr *Iterator) Valid() bool { + return itr.err == nil +} + +func (itr *Iterator) seekToFirst() { + numBlocks := len(itr.t.blockIndex) + if numBlocks == 0 { + itr.err = io.EOF + return + } + itr.bpos = 0 + block, err := itr.t.block(itr.bpos) + if err != nil { + itr.err = err + return + } + itr.bi = block.NewIterator() + itr.bi.SeekToFirst() + itr.err = itr.bi.Error() +} + +func (itr *Iterator) seekToLast() { + numBlocks := len(itr.t.blockIndex) + if numBlocks == 0 { + itr.err = io.EOF + return + } + itr.bpos = numBlocks - 1 + block, err := itr.t.block(itr.bpos) + if err != nil { + itr.err = err + return + } + itr.bi = block.NewIterator() + itr.bi.SeekToLast() + itr.err = itr.bi.Error() +} + +func (itr *Iterator) seekHelper(blockIdx int, key []byte) { + itr.bpos = blockIdx + block, err := itr.t.block(blockIdx) + if err != nil { + itr.err = err + return + } + itr.bi = block.NewIterator() + itr.bi.Seek(key, origin) + itr.err = itr.bi.Error() +} + +// seekFrom brings us to a key that is >= input key. +func (itr *Iterator) seekFrom(key []byte, whence int) { + itr.err = nil + switch whence { + case origin: + itr.reset() + case current: + } + + idx := sort.Search(len(itr.t.blockIndex), func(idx int) bool { + ko := itr.t.blockIndex[idx] + return y.CompareKeys(ko.key, key) > 0 + }) + if idx == 0 { + // The smallest key in our table is already strictly > key. We can return that. + // This is like a SeekToFirst. + itr.seekHelper(0, key) + return + } + + // block[idx].smallest is > key. + // Since idx>0, we know block[idx-1].smallest is <= key. + // There are two cases. + // 1) Everything in block[idx-1] is strictly < key. In this case, we should go to the first + // element of block[idx]. + // 2) Some element in block[idx-1] is >= key. We should go to that element. + itr.seekHelper(idx-1, key) + if itr.err == io.EOF { + // Case 1. Need to visit block[idx]. + if idx == len(itr.t.blockIndex) { + // If idx == len(itr.t.blockIndex), then input key is greater than ANY element of table. + // There's nothing we can do. Valid() should return false as we seek to end of table. + return + } + // Since block[idx].smallest is > key. This is essentially a block[idx].SeekToFirst. + itr.seekHelper(idx, key) + } + // Case 2: No need to do anything. We already did the seek in block[idx-1]. +} + +// seek will reset iterator and seek to >= key. +func (itr *Iterator) seek(key []byte) { + itr.seekFrom(key, origin) +} + +// seekForPrev will reset iterator and seek to <= key. +func (itr *Iterator) seekForPrev(key []byte) { + // TODO: Optimize this. We shouldn't have to take a Prev step. + itr.seekFrom(key, origin) + if !bytes.Equal(itr.Key(), key) { + itr.prev() + } +} + +func (itr *Iterator) next() { + itr.err = nil + + if itr.bpos >= len(itr.t.blockIndex) { + itr.err = io.EOF + return + } + + if itr.bi == nil { + block, err := itr.t.block(itr.bpos) + if err != nil { + itr.err = err + return + } + itr.bi = block.NewIterator() + itr.bi.SeekToFirst() + itr.err = itr.bi.Error() + return + } + + itr.bi.Next() + if !itr.bi.Valid() { + itr.bpos++ + itr.bi = nil + itr.next() + return + } +} + +func (itr *Iterator) prev() { + itr.err = nil + if itr.bpos < 0 { + itr.err = io.EOF + return + } + + if itr.bi == nil { + block, err := itr.t.block(itr.bpos) + if err != nil { + itr.err = err + return + } + itr.bi = block.NewIterator() + itr.bi.SeekToLast() + itr.err = itr.bi.Error() + return + } + + itr.bi.Prev() + if !itr.bi.Valid() { + itr.bpos-- + itr.bi = nil + itr.prev() + return + } +} + +// Key follows the y.Iterator interface +func (itr *Iterator) Key() []byte { + return itr.bi.Key() +} + +// Value follows the y.Iterator interface +func (itr *Iterator) Value() (ret y.ValueStruct) { + ret.Decode(itr.bi.Value()) + return +} + +// Next follows the y.Iterator interface +func (itr *Iterator) Next() { + if !itr.reversed { + itr.next() + } else { + itr.prev() + } +} + +// Rewind follows the y.Iterator interface +func (itr *Iterator) Rewind() { + if !itr.reversed { + itr.seekToFirst() + } else { + itr.seekToLast() + } +} + +// Seek follows the y.Iterator interface +func (itr *Iterator) Seek(key []byte) { + if !itr.reversed { + itr.seek(key) + } else { + itr.seekForPrev(key) + } +} + +// ConcatIterator concatenates the sequences defined by several iterators. (It only works with +// TableIterators, probably just because it's faster to not be so generic.) +type ConcatIterator struct { + idx int // Which iterator is active now. + cur *Iterator + iters []*Iterator // Corresponds to tables. + tables []*Table // Disregarding reversed, this is in ascending order. + reversed bool +} + +// NewConcatIterator creates a new concatenated iterator +func NewConcatIterator(tbls []*Table, reversed bool) *ConcatIterator { + iters := make([]*Iterator, len(tbls)) + for i := 0; i < len(tbls); i++ { + iters[i] = tbls[i].NewIterator(reversed) + } + return &ConcatIterator{ + reversed: reversed, + iters: iters, + tables: tbls, + idx: -1, // Not really necessary because s.it.Valid()=false, but good to have. + } +} + +func (s *ConcatIterator) setIdx(idx int) { + s.idx = idx + if idx < 0 || idx >= len(s.iters) { + s.cur = nil + } else { + s.cur = s.iters[s.idx] + } +} + +// Rewind implements y.Interface +func (s *ConcatIterator) Rewind() { + if len(s.iters) == 0 { + return + } + if !s.reversed { + s.setIdx(0) + } else { + s.setIdx(len(s.iters) - 1) + } + s.cur.Rewind() +} + +// Valid implements y.Interface +func (s *ConcatIterator) Valid() bool { + return s.cur != nil && s.cur.Valid() +} + +// Key implements y.Interface +func (s *ConcatIterator) Key() []byte { + return s.cur.Key() +} + +// Value implements y.Interface +func (s *ConcatIterator) Value() y.ValueStruct { + return s.cur.Value() +} + +// Seek brings us to element >= key if reversed is false. Otherwise, <= key. +func (s *ConcatIterator) Seek(key []byte) { + var idx int + if !s.reversed { + idx = sort.Search(len(s.tables), func(i int) bool { + return y.CompareKeys(s.tables[i].Biggest(), key) >= 0 + }) + } else { + n := len(s.tables) + idx = n - 1 - sort.Search(n, func(i int) bool { + return y.CompareKeys(s.tables[n-1-i].Smallest(), key) <= 0 + }) + } + if idx >= len(s.tables) || idx < 0 { + s.setIdx(-1) + return + } + // For reversed=false, we know s.tables[i-1].Biggest() < key. Thus, the + // previous table cannot possibly contain key. + s.setIdx(idx) + s.cur.Seek(key) +} + +// Next advances our concat iterator. +func (s *ConcatIterator) Next() { + s.cur.Next() + if s.cur.Valid() { + // Nothing to do. Just stay with the current table. + return + } + for { // In case there are empty tables. + if !s.reversed { + s.setIdx(s.idx + 1) + } else { + s.setIdx(s.idx - 1) + } + if s.cur == nil { + // End of list. Valid will become false. + return + } + s.cur.Rewind() + if s.cur.Valid() { + break + } + } +} + +// Close implements y.Interface. +func (s *ConcatIterator) Close() error { + for _, it := range s.iters { + if err := it.Close(); err != nil { + return errors.Wrap(err, "ConcatIterator") + } + } + return nil +} diff --git a/vendor/github.com/dgraph-io/badger/table/table.go b/vendor/github.com/dgraph-io/badger/table/table.go new file mode 100644 index 00000000..f8119346 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/table/table.go @@ -0,0 +1,356 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package table + +import ( + "bytes" + "crypto/sha256" + "encoding/binary" + "fmt" + "io" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/options" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" + "gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors" + "gx/ipfs/QmWaLViWQF8jgyoLLqqcSrnp6dJpHESiJfzor1vrfDyTZf/bbloom" +) + +const fileSuffix = ".sst" + +type keyOffset struct { + key []byte + offset int + len int +} + +// TableInterface is useful for testing. +type TableInterface interface { + Smallest() []byte + Biggest() []byte + DoesNotHave(key []byte) bool +} + +// Table represents a loaded table file with the info we have about it +type Table struct { + sync.Mutex + + fd *os.File // Own fd. + tableSize int // Initialized in OpenTable, using fd.Stat(). + + blockIndex []keyOffset + ref int32 // For file garbage collection. Atomic. + + loadingMode options.FileLoadingMode + mmap []byte // Memory mapped. + + // The following are initialized once and const. + smallest, biggest []byte // Smallest and largest keys. + id uint64 // file id, part of filename + + bf bbloom.Bloom + + Checksum []byte +} + +// IncrRef increments the refcount (having to do with whether the file should be deleted) +func (t *Table) IncrRef() { + atomic.AddInt32(&t.ref, 1) +} + +// DecrRef decrements the refcount and possibly deletes the table +func (t *Table) DecrRef() error { + newRef := atomic.AddInt32(&t.ref, -1) + if newRef == 0 { + // We can safely delete this file, because for all the current files, we always have + // at least one reference pointing to them. + + // It's necessary to delete windows files + if t.loadingMode == options.MemoryMap { + y.Munmap(t.mmap) + } + if err := t.fd.Truncate(0); err != nil { + // This is very important to let the FS know that the file is deleted. + return err + } + filename := t.fd.Name() + if err := t.fd.Close(); err != nil { + return err + } + if err := os.Remove(filename); err != nil { + return err + } + } + return nil +} + +type block struct { + offset int + data []byte +} + +func (b block) NewIterator() *blockIterator { + return &blockIterator{data: b.data} +} + +// OpenTable assumes file has only one table and opens it. Takes ownership of fd upon function +// entry. Returns a table with one reference count on it (decrementing which may delete the file! +// -- consider t.Close() instead). The fd has to writeable because we call Truncate on it before +// deleting. +func OpenTable(fd *os.File, mode options.FileLoadingMode, cksum []byte) (*Table, error) { + fileInfo, err := fd.Stat() + if err != nil { + // It's OK to ignore fd.Close() errs in this function because we have only read + // from the file. + _ = fd.Close() + return nil, y.Wrap(err) + } + + filename := fileInfo.Name() + id, ok := ParseFileID(filename) + if !ok { + _ = fd.Close() + return nil, errors.Errorf("Invalid filename: %s", filename) + } + t := &Table{ + fd: fd, + ref: 1, // Caller is given one reference. + id: id, + loadingMode: mode, + } + + t.tableSize = int(fileInfo.Size()) + + // We first load to RAM, so we can read the index and do checksum. + if err := t.loadToRAM(); err != nil { + return nil, err + } + // Enforce checksum before we read index. Otherwise, if the file was + // truncated, we'd end up with panics in readIndex. + if len(cksum) > 0 && !bytes.Equal(t.Checksum, cksum) { + return nil, fmt.Errorf( + "CHECKSUM_MISMATCH: Table checksum does not match checksum in MANIFEST."+ + " NOT including table %s. This would lead to missing data."+ + "\n sha256 %x Expected\n sha256 %x Found\n", filename, cksum, t.Checksum) + } + if err := t.readIndex(); err != nil { + return nil, y.Wrap(err) + } + + it := t.NewIterator(false) + defer it.Close() + it.Rewind() + if it.Valid() { + t.smallest = it.Key() + } + + it2 := t.NewIterator(true) + defer it2.Close() + it2.Rewind() + if it2.Valid() { + t.biggest = it2.Key() + } + + switch mode { + case options.LoadToRAM: + // No need to do anything. t.mmap is already filled. + case options.MemoryMap: + t.mmap, err = y.Mmap(fd, false, fileInfo.Size()) + if err != nil { + _ = fd.Close() + return nil, y.Wrapf(err, "Unable to map file") + } + case options.FileIO: + t.mmap = nil + default: + panic(fmt.Sprintf("Invalid loading mode: %v", mode)) + } + return t, nil +} + +// Close closes the open table. (Releases resources back to the OS.) +func (t *Table) Close() error { + if t.loadingMode == options.MemoryMap { + y.Munmap(t.mmap) + } + + return t.fd.Close() +} + +func (t *Table) read(off int, sz int) ([]byte, error) { + if len(t.mmap) > 0 { + if len(t.mmap[off:]) < sz { + return nil, y.ErrEOF + } + return t.mmap[off : off+sz], nil + } + + res := make([]byte, sz) + nbr, err := t.fd.ReadAt(res, int64(off)) + y.NumReads.Add(1) + y.NumBytesRead.Add(int64(nbr)) + return res, err +} + +func (t *Table) readNoFail(off int, sz int) []byte { + res, err := t.read(off, sz) + y.Check(err) + return res +} + +func (t *Table) readIndex() error { + if len(t.mmap) != t.tableSize { + panic("Table size does not match the read bytes") + } + readPos := t.tableSize + + // Read bloom filter. + readPos -= 4 + buf := t.readNoFail(readPos, 4) + bloomLen := int(binary.BigEndian.Uint32(buf)) + readPos -= bloomLen + data := t.readNoFail(readPos, bloomLen) + t.bf = *bbloom.JSONUnmarshal(data) + + readPos -= 4 + buf = t.readNoFail(readPos, 4) + restartsLen := int(binary.BigEndian.Uint32(buf)) + + readPos -= 4 * restartsLen + buf = t.readNoFail(readPos, 4*restartsLen) + + offsets := make([]int, restartsLen) + for i := 0; i < restartsLen; i++ { + offsets[i] = int(binary.BigEndian.Uint32(buf[:4])) + buf = buf[4:] + } + + // The last offset stores the end of the last block. + for i := 0; i < len(offsets); i++ { + var o int + if i == 0 { + o = 0 + } else { + o = offsets[i-1] + } + + ko := keyOffset{ + offset: o, + len: offsets[i] - o, + } + t.blockIndex = append(t.blockIndex, ko) + } + + // Execute this index read serially, because we already have table data in memory. + var h header + for idx := range t.blockIndex { + ko := &t.blockIndex[idx] + + hbuf := t.readNoFail(ko.offset, h.Size()) + h.Decode(hbuf) + y.AssertTrue(h.plen == 0) + + key := t.readNoFail(ko.offset+len(hbuf), int(h.klen)) + ko.key = append([]byte{}, key...) + } + + return nil +} + +func (t *Table) block(idx int) (block, error) { + y.AssertTruef(idx >= 0, "idx=%d", idx) + if idx >= len(t.blockIndex) { + return block{}, errors.New("block out of index") + } + + ko := t.blockIndex[idx] + blk := block{ + offset: ko.offset, + } + var err error + blk.data, err = t.read(blk.offset, ko.len) + return blk, err +} + +// Size is its file size in bytes +func (t *Table) Size() int64 { return int64(t.tableSize) } + +// Smallest is its smallest key, or nil if there are none +func (t *Table) Smallest() []byte { return t.smallest } + +// Biggest is its biggest key, or nil if there are none +func (t *Table) Biggest() []byte { return t.biggest } + +// Filename is NOT the file name. Just kidding, it is. +func (t *Table) Filename() string { return t.fd.Name() } + +// ID is the table's ID number (used to make the file name). +func (t *Table) ID() uint64 { return t.id } + +// DoesNotHave returns true if (but not "only if") the table does not have the key. It does a +// bloom filter lookup. +func (t *Table) DoesNotHave(key []byte) bool { return !t.bf.Has(key) } + +// ParseFileID reads the file id out of a filename. +func ParseFileID(name string) (uint64, bool) { + name = path.Base(name) + if !strings.HasSuffix(name, fileSuffix) { + return 0, false + } + // suffix := name[len(fileSuffix):] + name = strings.TrimSuffix(name, fileSuffix) + id, err := strconv.Atoi(name) + if err != nil { + return 0, false + } + y.AssertTrue(id >= 0) + return uint64(id), true +} + +// IDToFilename does the inverse of ParseFileID +func IDToFilename(id uint64) string { + return fmt.Sprintf("%06d", id) + fileSuffix +} + +// NewFilename should be named TableFilepath -- it combines the dir with the ID to make a table +// filepath. +func NewFilename(id uint64, dir string) string { + return filepath.Join(dir, IDToFilename(id)) +} + +func (t *Table) loadToRAM() error { + if _, err := t.fd.Seek(0, io.SeekStart); err != nil { + return err + } + t.mmap = make([]byte, t.tableSize) + sum := sha256.New() + tee := io.TeeReader(t.fd, sum) + read, err := tee.Read(t.mmap) + if err != nil || read != t.tableSize { + return y.Wrapf(err, "Unable to load file in memory. Table file: %s", t.Filename()) + } + t.Checksum = sum.Sum(nil) + y.NumReads.Add(1) + y.NumBytesRead.Add(int64(read)) + return nil +} diff --git a/vendor/github.com/dgraph-io/badger/table/table_test.go b/vendor/github.com/dgraph-io/badger/table/table_test.go new file mode 100644 index 00000000..d16e4083 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/table/table_test.go @@ -0,0 +1,729 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package table + +import ( + "fmt" + "math/rand" + "os" + "sort" + "testing" + + "github.com/stretchr/testify/require" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/options" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" +) + +func key(prefix string, i int) string { + return prefix + fmt.Sprintf("%04d", i) +} + +func buildTestTable(t *testing.T, prefix string, n int) *os.File { + y.AssertTrue(n <= 10000) + keyValues := make([][]string, n) + for i := 0; i < n; i++ { + k := key(prefix, i) + v := fmt.Sprintf("%d", i) + keyValues[i] = []string{k, v} + } + return buildTable(t, keyValues) +} + +// keyValues is n by 2 where n is number of pairs. +func buildTable(t *testing.T, keyValues [][]string) *os.File { + b := NewTableBuilder() + defer b.Close() + // TODO: Add test for file garbage collection here. No files should be left after the tests here. + + filename := fmt.Sprintf("%s%s%d.sst", os.TempDir(), string(os.PathSeparator), rand.Int63()) + f, err := y.OpenSyncedFile(filename, true) + if t != nil { + require.NoError(t, err) + } else { + y.Check(err) + } + + sort.Slice(keyValues, func(i, j int) bool { + return keyValues[i][0] < keyValues[j][0] + }) + for _, kv := range keyValues { + y.AssertTrue(len(kv) == 2) + err := b.Add(y.KeyWithTs([]byte(kv[0]), 0), y.ValueStruct{Value: []byte(kv[1]), Meta: 'A', UserMeta: 0}) + if t != nil { + require.NoError(t, err) + } else { + y.Check(err) + } + } + f.Write(b.Finish()) + f.Close() + f, _ = y.OpenSyncedFile(filename, true) + return f +} + +func TestTableIterator(t *testing.T) { + for _, n := range []int{99, 100, 101} { + t.Run(fmt.Sprintf("n=%d", n), func(t *testing.T) { + f := buildTestTable(t, "key", n) + table, err := OpenTable(f, options.MemoryMap, nil) + require.NoError(t, err) + defer table.DecrRef() + it := table.NewIterator(false) + defer it.Close() + count := 0 + for it.Rewind(); it.Valid(); it.Next() { + v := it.Value() + k := y.KeyWithTs([]byte(key("key", count)), 0) + require.EqualValues(t, k, it.Key()) + require.EqualValues(t, fmt.Sprintf("%d", count), string(v.Value)) + count++ + } + require.Equal(t, count, n) + }) + } +} + +func TestSeekToFirst(t *testing.T) { + for _, n := range []int{99, 100, 101, 199, 200, 250, 9999, 10000} { + t.Run(fmt.Sprintf("n=%d", n), func(t *testing.T) { + f := buildTestTable(t, "key", n) + table, err := OpenTable(f, options.MemoryMap, nil) + require.NoError(t, err) + defer table.DecrRef() + it := table.NewIterator(false) + defer it.Close() + it.seekToFirst() + require.True(t, it.Valid()) + v := it.Value() + require.EqualValues(t, "0", string(v.Value)) + require.EqualValues(t, 'A', v.Meta) + }) + } +} + +func TestSeekToLast(t *testing.T) { + for _, n := range []int{99, 100, 101, 199, 200, 250, 9999, 10000} { + t.Run(fmt.Sprintf("n=%d", n), func(t *testing.T) { + f := buildTestTable(t, "key", n) + table, err := OpenTable(f, options.MemoryMap, nil) + require.NoError(t, err) + defer table.DecrRef() + it := table.NewIterator(false) + defer it.Close() + it.seekToLast() + require.True(t, it.Valid()) + v := it.Value() + require.EqualValues(t, fmt.Sprintf("%d", n-1), string(v.Value)) + require.EqualValues(t, 'A', v.Meta) + it.prev() + require.True(t, it.Valid()) + v = it.Value() + require.EqualValues(t, fmt.Sprintf("%d", n-2), string(v.Value)) + require.EqualValues(t, 'A', v.Meta) + }) + } +} + +func TestSeek(t *testing.T) { + f := buildTestTable(t, "k", 10000) + table, err := OpenTable(f, options.MemoryMap, nil) + require.NoError(t, err) + defer table.DecrRef() + + it := table.NewIterator(false) + defer it.Close() + + var data = []struct { + in string + valid bool + out string + }{ + {"abc", true, "k0000"}, + {"k0100", true, "k0100"}, + {"k0100b", true, "k0101"}, // Test case where we jump to next block. + {"k1234", true, "k1234"}, + {"k1234b", true, "k1235"}, + {"k9999", true, "k9999"}, + {"z", false, ""}, + } + + for _, tt := range data { + it.seek(y.KeyWithTs([]byte(tt.in), 0)) + if !tt.valid { + require.False(t, it.Valid()) + continue + } + require.True(t, it.Valid()) + k := it.Key() + require.EqualValues(t, tt.out, string(y.ParseKey(k))) + } +} + +func TestSeekForPrev(t *testing.T) { + f := buildTestTable(t, "k", 10000) + table, err := OpenTable(f, options.MemoryMap, nil) + require.NoError(t, err) + defer table.DecrRef() + + it := table.NewIterator(false) + defer it.Close() + + var data = []struct { + in string + valid bool + out string + }{ + {"abc", false, ""}, + {"k0100", true, "k0100"}, + {"k0100b", true, "k0100"}, // Test case where we jump to next block. + {"k1234", true, "k1234"}, + {"k1234b", true, "k1234"}, + {"k9999", true, "k9999"}, + {"z", true, "k9999"}, + } + + for _, tt := range data { + it.seekForPrev(y.KeyWithTs([]byte(tt.in), 0)) + if !tt.valid { + require.False(t, it.Valid()) + continue + } + require.True(t, it.Valid()) + k := it.Key() + require.EqualValues(t, tt.out, string(y.ParseKey(k))) + } +} + +func TestIterateFromStart(t *testing.T) { + // Vary the number of elements added. + for _, n := range []int{99, 100, 101, 199, 200, 250, 9999, 10000} { + t.Run(fmt.Sprintf("n=%d", n), func(t *testing.T) { + f := buildTestTable(t, "key", n) + table, err := OpenTable(f, options.MemoryMap, nil) + require.NoError(t, err) + defer table.DecrRef() + ti := table.NewIterator(false) + defer ti.Close() + ti.reset() + ti.seekToFirst() + require.True(t, ti.Valid()) + // No need to do a Next. + // ti.Seek brings us to the first key >= "". Essentially a SeekToFirst. + var count int + for ; ti.Valid(); ti.next() { + v := ti.Value() + require.EqualValues(t, fmt.Sprintf("%d", count), string(v.Value)) + require.EqualValues(t, 'A', v.Meta) + count++ + } + require.EqualValues(t, n, count) + }) + } +} + +func TestIterateFromEnd(t *testing.T) { + // Vary the number of elements added. + for _, n := range []int{99, 100, 101, 199, 200, 250, 9999, 10000} { + t.Run(fmt.Sprintf("n=%d", n), func(t *testing.T) { + f := buildTestTable(t, "key", n) + table, err := OpenTable(f, options.FileIO, nil) + require.NoError(t, err) + defer table.DecrRef() + ti := table.NewIterator(false) + defer ti.Close() + ti.reset() + ti.seek(y.KeyWithTs([]byte("zzzzzz"), 0)) // Seek to end, an invalid element. + require.False(t, ti.Valid()) + for i := n - 1; i >= 0; i-- { + ti.prev() + require.True(t, ti.Valid()) + v := ti.Value() + require.EqualValues(t, fmt.Sprintf("%d", i), string(v.Value)) + require.EqualValues(t, 'A', v.Meta) + } + ti.prev() + require.False(t, ti.Valid()) + }) + } +} + +func TestTable(t *testing.T) { + f := buildTestTable(t, "key", 10000) + table, err := OpenTable(f, options.FileIO, nil) + require.NoError(t, err) + defer table.DecrRef() + ti := table.NewIterator(false) + defer ti.Close() + kid := 1010 + seek := y.KeyWithTs([]byte(key("key", kid)), 0) + for ti.seek(seek); ti.Valid(); ti.next() { + k := ti.Key() + require.EqualValues(t, string(y.ParseKey(k)), key("key", kid)) + kid++ + } + if kid != 10000 { + t.Errorf("Expected kid: 10000. Got: %v", kid) + } + + ti.seek(y.KeyWithTs([]byte(key("key", 99999)), 0)) + require.False(t, ti.Valid()) + + ti.seek(y.KeyWithTs([]byte(key("key", -1)), 0)) + require.True(t, ti.Valid()) + k := ti.Key() + require.EqualValues(t, string(y.ParseKey(k)), key("key", 0)) +} + +func TestIterateBackAndForth(t *testing.T) { + f := buildTestTable(t, "key", 10000) + table, err := OpenTable(f, options.MemoryMap, nil) + require.NoError(t, err) + defer table.DecrRef() + + seek := y.KeyWithTs([]byte(key("key", 1010)), 0) + it := table.NewIterator(false) + defer it.Close() + it.seek(seek) + require.True(t, it.Valid()) + k := it.Key() + require.EqualValues(t, seek, k) + + it.prev() + it.prev() + require.True(t, it.Valid()) + k = it.Key() + require.EqualValues(t, key("key", 1008), string(y.ParseKey(k))) + + it.next() + it.next() + require.True(t, it.Valid()) + k = it.Key() + require.EqualValues(t, key("key", 1010), y.ParseKey(k)) + + it.seek(y.KeyWithTs([]byte(key("key", 2000)), 0)) + require.True(t, it.Valid()) + k = it.Key() + require.EqualValues(t, key("key", 2000), y.ParseKey(k)) + + it.prev() + require.True(t, it.Valid()) + k = it.Key() + require.EqualValues(t, key("key", 1999), y.ParseKey(k)) + + it.seekToFirst() + k = it.Key() + require.EqualValues(t, key("key", 0), y.ParseKey(k)) +} + +func TestUniIterator(t *testing.T) { + f := buildTestTable(t, "key", 10000) + table, err := OpenTable(f, options.MemoryMap, nil) + require.NoError(t, err) + defer table.DecrRef() + { + it := table.NewIterator(false) + defer it.Close() + var count int + for it.Rewind(); it.Valid(); it.Next() { + v := it.Value() + require.EqualValues(t, fmt.Sprintf("%d", count), string(v.Value)) + require.EqualValues(t, 'A', v.Meta) + count++ + } + require.EqualValues(t, 10000, count) + } + { + it := table.NewIterator(true) + defer it.Close() + var count int + for it.Rewind(); it.Valid(); it.Next() { + v := it.Value() + require.EqualValues(t, fmt.Sprintf("%d", 10000-1-count), string(v.Value)) + require.EqualValues(t, 'A', v.Meta) + count++ + } + require.EqualValues(t, 10000, count) + } +} + +// Try having only one table. +func TestConcatIteratorOneTable(t *testing.T) { + f := buildTable(t, [][]string{ + {"k1", "a1"}, + {"k2", "a2"}, + }) + + tbl, err := OpenTable(f, options.MemoryMap, nil) + require.NoError(t, err) + defer tbl.DecrRef() + + it := NewConcatIterator([]*Table{tbl}, false) + defer it.Close() + + it.Rewind() + require.True(t, it.Valid()) + k := it.Key() + require.EqualValues(t, "k1", string(y.ParseKey(k))) + vs := it.Value() + require.EqualValues(t, "a1", string(vs.Value)) + require.EqualValues(t, 'A', vs.Meta) +} + +func TestConcatIterator(t *testing.T) { + f := buildTestTable(t, "keya", 10000) + f2 := buildTestTable(t, "keyb", 10000) + f3 := buildTestTable(t, "keyc", 10000) + tbl, err := OpenTable(f, options.MemoryMap, nil) + require.NoError(t, err) + defer tbl.DecrRef() + tbl2, err := OpenTable(f2, options.LoadToRAM, nil) + require.NoError(t, err) + defer tbl2.DecrRef() + tbl3, err := OpenTable(f3, options.LoadToRAM, nil) + require.NoError(t, err) + defer tbl3.DecrRef() + + { + it := NewConcatIterator([]*Table{tbl, tbl2, tbl3}, false) + defer it.Close() + it.Rewind() + require.True(t, it.Valid()) + var count int + for ; it.Valid(); it.Next() { + vs := it.Value() + require.EqualValues(t, fmt.Sprintf("%d", count%10000), string(vs.Value)) + require.EqualValues(t, 'A', vs.Meta) + count++ + } + require.EqualValues(t, 30000, count) + + it.Seek(y.KeyWithTs([]byte("a"), 0)) + require.EqualValues(t, "keya0000", string(y.ParseKey(it.Key()))) + vs := it.Value() + require.EqualValues(t, "0", string(vs.Value)) + + it.Seek(y.KeyWithTs([]byte("keyb"), 0)) + require.EqualValues(t, "keyb0000", string(y.ParseKey(it.Key()))) + vs = it.Value() + require.EqualValues(t, "0", string(vs.Value)) + + it.Seek(y.KeyWithTs([]byte("keyb9999b"), 0)) + require.EqualValues(t, "keyc0000", string(y.ParseKey(it.Key()))) + vs = it.Value() + require.EqualValues(t, "0", string(vs.Value)) + + it.Seek(y.KeyWithTs([]byte("keyd"), 0)) + require.False(t, it.Valid()) + } + { + it := NewConcatIterator([]*Table{tbl, tbl2, tbl3}, true) + defer it.Close() + it.Rewind() + require.True(t, it.Valid()) + var count int + for ; it.Valid(); it.Next() { + vs := it.Value() + require.EqualValues(t, fmt.Sprintf("%d", 10000-(count%10000)-1), string(vs.Value)) + require.EqualValues(t, 'A', vs.Meta) + count++ + } + require.EqualValues(t, 30000, count) + + it.Seek(y.KeyWithTs([]byte("a"), 0)) + require.False(t, it.Valid()) + + it.Seek(y.KeyWithTs([]byte("keyb"), 0)) + require.EqualValues(t, "keya9999", string(y.ParseKey(it.Key()))) + vs := it.Value() + require.EqualValues(t, "9999", string(vs.Value)) + + it.Seek(y.KeyWithTs([]byte("keyb9999b"), 0)) + require.EqualValues(t, "keyb9999", string(y.ParseKey(it.Key()))) + vs = it.Value() + require.EqualValues(t, "9999", string(vs.Value)) + + it.Seek(y.KeyWithTs([]byte("keyd"), 0)) + require.EqualValues(t, "keyc9999", string(y.ParseKey(it.Key()))) + vs = it.Value() + require.EqualValues(t, "9999", string(vs.Value)) + } +} + +func TestMergingIterator(t *testing.T) { + f1 := buildTable(t, [][]string{ + {"k1", "a1"}, + {"k2", "a2"}, + }) + f2 := buildTable(t, [][]string{ + {"k1", "b1"}, + {"k2", "b2"}, + }) + tbl1, err := OpenTable(f1, options.LoadToRAM, nil) + require.NoError(t, err) + defer tbl1.DecrRef() + tbl2, err := OpenTable(f2, options.LoadToRAM, nil) + require.NoError(t, err) + defer tbl2.DecrRef() + it1 := tbl1.NewIterator(false) + it2 := NewConcatIterator([]*Table{tbl2}, false) + it := y.NewMergeIterator([]y.Iterator{it1, it2}, false) + defer it.Close() + + it.Rewind() + require.True(t, it.Valid()) + k := it.Key() + require.EqualValues(t, "k1", string(y.ParseKey(k))) + vs := it.Value() + require.EqualValues(t, "a1", string(vs.Value)) + require.EqualValues(t, 'A', vs.Meta) + it.Next() + + require.True(t, it.Valid()) + k = it.Key() + require.EqualValues(t, "k2", string(y.ParseKey(k))) + vs = it.Value() + require.EqualValues(t, "a2", string(vs.Value)) + require.EqualValues(t, 'A', vs.Meta) + it.Next() + + require.False(t, it.Valid()) +} + +func TestMergingIteratorReversed(t *testing.T) { + f1 := buildTable(t, [][]string{ + {"k1", "a1"}, + {"k2", "a2"}, + }) + f2 := buildTable(t, [][]string{ + {"k1", "b1"}, + {"k2", "b2"}, + }) + tbl1, err := OpenTable(f1, options.LoadToRAM, nil) + require.NoError(t, err) + defer tbl1.DecrRef() + tbl2, err := OpenTable(f2, options.LoadToRAM, nil) + require.NoError(t, err) + defer tbl2.DecrRef() + it1 := tbl1.NewIterator(true) + it2 := NewConcatIterator([]*Table{tbl2}, true) + it := y.NewMergeIterator([]y.Iterator{it1, it2}, true) + defer it.Close() + + it.Rewind() + require.True(t, it.Valid()) + k := it.Key() + require.EqualValues(t, "k2", string(y.ParseKey(k))) + vs := it.Value() + require.EqualValues(t, "a2", string(vs.Value)) + require.EqualValues(t, 'A', vs.Meta) + it.Next() + + require.True(t, it.Valid()) + k = it.Key() + require.EqualValues(t, "k1", string(y.ParseKey(k))) + vs = it.Value() + require.EqualValues(t, "a1", string(vs.Value)) + require.EqualValues(t, 'A', vs.Meta) + it.Next() + + require.False(t, it.Valid()) +} + +// Take only the first iterator. +func TestMergingIteratorTakeOne(t *testing.T) { + f1 := buildTable(t, [][]string{ + {"k1", "a1"}, + {"k2", "a2"}, + }) + f2 := buildTable(t, [][]string{}) + + t1, err := OpenTable(f1, options.LoadToRAM, nil) + require.NoError(t, err) + defer t1.DecrRef() + t2, err := OpenTable(f2, options.LoadToRAM, nil) + require.NoError(t, err) + defer t2.DecrRef() + + it1 := NewConcatIterator([]*Table{t1}, false) + it2 := NewConcatIterator([]*Table{t2}, false) + it := y.NewMergeIterator([]y.Iterator{it1, it2}, false) + defer it.Close() + + it.Rewind() + require.True(t, it.Valid()) + k := it.Key() + require.EqualValues(t, "k1", string(y.ParseKey(k))) + vs := it.Value() + require.EqualValues(t, "a1", string(vs.Value)) + require.EqualValues(t, 'A', vs.Meta) + it.Next() + + require.True(t, it.Valid()) + k = it.Key() + require.EqualValues(t, "k2", string(y.ParseKey(k))) + vs = it.Value() + require.EqualValues(t, "a2", string(vs.Value)) + require.EqualValues(t, 'A', vs.Meta) + it.Next() + + require.False(t, it.Valid()) +} + +// Take only the second iterator. +func TestMergingIteratorTakeTwo(t *testing.T) { + f1 := buildTable(t, [][]string{}) + f2 := buildTable(t, [][]string{ + {"k1", "a1"}, + {"k2", "a2"}, + }) + + t1, err := OpenTable(f1, options.LoadToRAM, nil) + require.NoError(t, err) + defer t1.DecrRef() + t2, err := OpenTable(f2, options.LoadToRAM, nil) + require.NoError(t, err) + defer t2.DecrRef() + + it1 := NewConcatIterator([]*Table{t1}, false) + it2 := NewConcatIterator([]*Table{t2}, false) + it := y.NewMergeIterator([]y.Iterator{it1, it2}, false) + defer it.Close() + + it.Rewind() + require.True(t, it.Valid()) + k := it.Key() + require.EqualValues(t, "k1", string(y.ParseKey(k))) + vs := it.Value() + require.EqualValues(t, "a1", string(vs.Value)) + require.EqualValues(t, 'A', vs.Meta) + it.Next() + + require.True(t, it.Valid()) + k = it.Key() + require.EqualValues(t, "k2", string(y.ParseKey(k))) + vs = it.Value() + require.EqualValues(t, "a2", string(vs.Value)) + require.EqualValues(t, 'A', vs.Meta) + it.Next() + + require.False(t, it.Valid()) +} + +func BenchmarkRead(b *testing.B) { + n := 5 << 20 + builder := NewTableBuilder() + filename := fmt.Sprintf("%s%s%d.sst", os.TempDir(), string(os.PathSeparator), rand.Int63()) + f, err := y.OpenSyncedFile(filename, true) + y.Check(err) + for i := 0; i < n; i++ { + k := fmt.Sprintf("%016x", i) + v := fmt.Sprintf("%d", i) + y.Check(builder.Add([]byte(k), y.ValueStruct{Value: []byte(v), Meta: 123, UserMeta: 0})) + } + + f.Write(builder.Finish()) + tbl, err := OpenTable(f, options.MemoryMap, nil) + y.Check(err) + defer tbl.DecrRef() + + // y.Printf("Size of table: %d\n", tbl.Size()) + b.ResetTimer() + // Iterate b.N times over the entire table. + for i := 0; i < b.N; i++ { + func() { + it := tbl.NewIterator(false) + defer it.Close() + for it.seekToFirst(); it.Valid(); it.next() { + } + }() + } +} + +func BenchmarkReadAndBuild(b *testing.B) { + n := 5 << 20 + builder := NewTableBuilder() + filename := fmt.Sprintf("%s%s%d.sst", os.TempDir(), string(os.PathSeparator), rand.Int63()) + f, err := y.OpenSyncedFile(filename, true) + y.Check(err) + for i := 0; i < n; i++ { + k := fmt.Sprintf("%016x", i) + v := fmt.Sprintf("%d", i) + y.Check(builder.Add([]byte(k), y.ValueStruct{Value: []byte(v), Meta: 123, UserMeta: 0})) + } + + f.Write(builder.Finish()) + tbl, err := OpenTable(f, options.MemoryMap, nil) + y.Check(err) + defer tbl.DecrRef() + + // y.Printf("Size of table: %d\n", tbl.Size()) + b.ResetTimer() + // Iterate b.N times over the entire table. + for i := 0; i < b.N; i++ { + func() { + newBuilder := NewTableBuilder() + it := tbl.NewIterator(false) + defer it.Close() + for it.seekToFirst(); it.Valid(); it.next() { + vs := it.Value() + newBuilder.Add(it.Key(), vs) + } + newBuilder.Finish() + }() + } +} + +func BenchmarkReadMerged(b *testing.B) { + n := 5 << 20 + m := 5 // Number of tables. + y.AssertTrue((n % m) == 0) + tableSize := n / m + var tables []*Table + for i := 0; i < m; i++ { + filename := fmt.Sprintf("%s%s%d.sst", os.TempDir(), string(os.PathSeparator), rand.Int63()) + builder := NewTableBuilder() + f, err := y.OpenSyncedFile(filename, true) + y.Check(err) + for j := 0; j < tableSize; j++ { + id := j*m + i // Arrays are interleaved. + // id := i*tableSize+j (not interleaved) + k := fmt.Sprintf("%016x", id) + v := fmt.Sprintf("%d", id) + y.Check(builder.Add([]byte(k), y.ValueStruct{Value: []byte(v), Meta: 123, UserMeta: 0})) + } + f.Write(builder.Finish()) + tbl, err := OpenTable(f, options.MemoryMap, nil) + y.Check(err) + tables = append(tables, tbl) + defer tbl.DecrRef() + } + + b.ResetTimer() + // Iterate b.N times over the entire table. + for i := 0; i < b.N; i++ { + func() { + var iters []y.Iterator + for _, tbl := range tables { + iters = append(iters, tbl.NewIterator(false)) + } + it := y.NewMergeIterator(iters, false) + defer it.Close() + for it.Rewind(); it.Valid(); it.Next() { + } + }() + } +} diff --git a/vendor/github.com/dgraph-io/badger/test.sh b/vendor/github.com/dgraph-io/badger/test.sh new file mode 100644 index 00000000..2216ecbd --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/test.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +set -e + +# Ensure that we can compile the binary. +pushd badger +go build -v . +popd + +# Run the memory intensive tests first. +go test -v --manual=true -run='TestBigKeyValuePairs$' +go test -v --manual=true -run='TestPushValueLogLimit' + +# Run the special Truncate test. +rm -R p || true +go test -v --manual=true -run='TestTruncateVlogNoClose$' . +truncate --size=4096 p/000000.vlog +go test -v --manual=true -run='TestTruncateVlogNoClose2$' . +go test -v --manual=true -run='TestTruncateVlogNoClose3$' . +rm -R p + +# Then the normal tests. +go test -v --vlog_mmap=true -race ./... +go test -v --vlog_mmap=false -race ./... diff --git a/vendor/github.com/dgraph-io/badger/txn.go b/vendor/github.com/dgraph-io/badger/txn.go new file mode 100644 index 00000000..1164108a --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/txn.go @@ -0,0 +1,753 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "bytes" + "context" + "encoding/hex" + "math" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + farm "gx/ipfs/QmRFFHk2jw9tgjxv12bCuuTnSbVXxEvYQkuNCLMEv9eUwP/go-farm" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" + "gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors" +) + +type oracle struct { + // A 64-bit integer must be at the top for memory alignment. See issue #311. + refCount int64 + isManaged bool // Does not change value, so no locking required. + + sync.Mutex // For nextTxnTs and commits. + // writeChLock lock is for ensuring that transactions go to the write + // channel in the same order as their commit timestamps. + writeChLock sync.Mutex + nextTxnTs uint64 + + // Used to block NewTransaction, so all previous commits are visible to a new read. + txnMark *y.WaterMark + + // Either of these is used to determine which versions can be permanently + // discarded during compaction. + discardTs uint64 // Used by ManagedDB. + readMark *y.WaterMark // Used by DB. + + // commits stores a key fingerprint and latest commit counter for it. + // refCount is used to clear out commits map to avoid a memory blowup. + commits map[uint64]uint64 + + // closer is used to stop watermarks. + closer *y.Closer +} + +func newOracle(opt Options) *oracle { + orc := &oracle{ + isManaged: opt.managedTxns, + commits: make(map[uint64]uint64), + // We're not initializing nextTxnTs and readOnlyTs. It would be done after replay in Open. + // + // WaterMarks must be 64-bit aligned for atomic package, hence we must use pointers here. + // See https://golang.org/pkg/sync/atomic/#pkg-note-BUG. + readMark: &y.WaterMark{Name: "badger.PendingReads"}, + txnMark: &y.WaterMark{Name: "badger.TxnTimestamp"}, + closer: y.NewCloser(2), + } + orc.readMark.Init(orc.closer) + orc.txnMark.Init(orc.closer) + return orc +} + +func (o *oracle) Stop() { + o.closer.SignalAndWait() +} + +func (o *oracle) addRef() { + atomic.AddInt64(&o.refCount, 1) +} + +func (o *oracle) decrRef() { + if atomic.AddInt64(&o.refCount, -1) != 0 { + return + } + + // Clear out commits maps to release memory. + o.Lock() + defer o.Unlock() + // Avoids the race where something new is added to commitsMap + // after we check refCount and before we take Lock. + if atomic.LoadInt64(&o.refCount) != 0 { + return + } + if len(o.commits) >= 1000 { // If the map is still small, let it slide. + o.commits = make(map[uint64]uint64) + } +} + +func (o *oracle) readTs() uint64 { + if o.isManaged { + panic("ReadTs should not be retrieved for managed DB") + } + + var readTs uint64 + o.Lock() + readTs = o.nextTxnTs - 1 + o.readMark.Begin(readTs) + o.Unlock() + + // Wait for all txns which have no conflicts, have been assigned a commit + // timestamp and are going through the write to value log and LSM tree + // process. Not waiting here could mean that some txns which have been + // committed would not be read. + y.Check(o.txnMark.WaitForMark(context.Background(), readTs)) + return readTs +} + +func (o *oracle) nextTs() uint64 { + o.Lock() + defer o.Unlock() + return o.nextTxnTs +} + +// Any deleted or invalid versions at or below ts would be discarded during +// compaction to reclaim disk space in LSM tree and thence value log. +func (o *oracle) setDiscardTs(ts uint64) { + o.Lock() + defer o.Unlock() + o.discardTs = ts +} + +func (o *oracle) discardAtOrBelow() uint64 { + if o.isManaged { + o.Lock() + defer o.Unlock() + return o.discardTs + } + return o.readMark.DoneUntil() +} + +// hasConflict must be called while having a lock. +func (o *oracle) hasConflict(txn *Txn) bool { + if len(txn.reads) == 0 { + return false + } + for _, ro := range txn.reads { + // A commit at the read timestamp is expected. + // But, any commit after the read timestamp should cause a conflict. + if ts, has := o.commits[ro]; has && ts > txn.readTs { + return true + } + } + return false +} + +func (o *oracle) newCommitTs(txn *Txn) uint64 { + o.Lock() + defer o.Unlock() + + if o.hasConflict(txn) { + return 0 + } + + var ts uint64 + if !o.isManaged { + // This is the general case, when user doesn't specify the read and commit ts. + ts = o.nextTxnTs + o.nextTxnTs++ + o.txnMark.Begin(ts) + + } else { + // If commitTs is set, use it instead. + ts = txn.commitTs + } + + for _, w := range txn.writes { + o.commits[w] = ts // Update the commitTs. + } + return ts +} + +func (o *oracle) doneCommit(cts uint64) { + if o.isManaged { + // No need to update anything. + return + } + o.txnMark.Done(cts) +} + +// Txn represents a Badger transaction. +type Txn struct { + readTs uint64 + commitTs uint64 + + update bool // update is used to conditionally keep track of reads. + reads []uint64 // contains fingerprints of keys read. + writes []uint64 // contains fingerprints of keys written. + + pendingWrites map[string]*Entry // cache stores any writes done by txn. + + db *DB + discarded bool + + size int64 + count int64 + numIterators int32 +} + +type pendingWritesIterator struct { + entries []*Entry + nextIdx int + readTs uint64 + reversed bool +} + +func (pi *pendingWritesIterator) Next() { + pi.nextIdx++ +} + +func (pi *pendingWritesIterator) Rewind() { + pi.nextIdx = 0 +} + +func (pi *pendingWritesIterator) Seek(key []byte) { + key = y.ParseKey(key) + pi.nextIdx = sort.Search(len(pi.entries), func(idx int) bool { + cmp := bytes.Compare(pi.entries[idx].Key, key) + if !pi.reversed { + return cmp >= 0 + } + return cmp <= 0 + }) +} + +func (pi *pendingWritesIterator) Key() []byte { + y.AssertTrue(pi.Valid()) + entry := pi.entries[pi.nextIdx] + return y.KeyWithTs(entry.Key, pi.readTs) +} + +func (pi *pendingWritesIterator) Value() y.ValueStruct { + y.AssertTrue(pi.Valid()) + entry := pi.entries[pi.nextIdx] + return y.ValueStruct{ + Value: entry.Value, + Meta: entry.meta, + UserMeta: entry.UserMeta, + ExpiresAt: entry.ExpiresAt, + Version: pi.readTs, + } +} + +func (pi *pendingWritesIterator) Valid() bool { + return pi.nextIdx < len(pi.entries) +} + +func (pi *pendingWritesIterator) Close() error { + return nil +} + +func (txn *Txn) newPendingWritesIterator(reversed bool) *pendingWritesIterator { + if !txn.update || len(txn.pendingWrites) == 0 { + return nil + } + entries := make([]*Entry, 0, len(txn.pendingWrites)) + for _, e := range txn.pendingWrites { + entries = append(entries, e) + } + // Number of pending writes per transaction shouldn't be too big in general. + sort.Slice(entries, func(i, j int) bool { + cmp := bytes.Compare(entries[i].Key, entries[j].Key) + if !reversed { + return cmp < 0 + } + return cmp > 0 + }) + return &pendingWritesIterator{ + readTs: txn.readTs, + entries: entries, + reversed: reversed, + } +} + +func (txn *Txn) checkSize(e *Entry) error { + count := txn.count + 1 + // Extra bytes for version in key. + size := txn.size + int64(e.estimateSize(txn.db.opt.ValueThreshold)) + 10 + if count >= txn.db.opt.maxBatchCount || size >= txn.db.opt.maxBatchSize { + return ErrTxnTooBig + } + txn.count, txn.size = count, size + return nil +} + +// Set adds a key-value pair to the database. +// +// It will return ErrReadOnlyTxn if update flag was set to false when creating the +// transaction. +// +// The current transaction keeps a reference to the key and val byte slice +// arguments. Users must not modify key and val until the end of the transaction. +func (txn *Txn) Set(key, val []byte) error { + e := &Entry{ + Key: key, + Value: val, + } + return txn.SetEntry(e) +} + +// SetWithMeta adds a key-value pair to the database, along with a metadata +// byte. +// +// This byte is stored alongside the key, and can be used as an aid to +// interpret the value or store other contextual bits corresponding to the +// key-value pair. +// +// The current transaction keeps a reference to the key and val byte slice +// arguments. Users must not modify key and val until the end of the transaction. +func (txn *Txn) SetWithMeta(key, val []byte, meta byte) error { + e := &Entry{Key: key, Value: val, UserMeta: meta} + return txn.SetEntry(e) +} + +// SetWithDiscard acts like SetWithMeta, but adds a marker to discard earlier +// versions of the key. +// +// This method is only useful if you have set a higher limit for +// options.NumVersionsToKeep. The default setting is 1, in which case, this +// function doesn't add any more benefit than just calling the normal +// SetWithMeta (or Set) function. If however, you have a higher setting for +// NumVersionsToKeep (in Dgraph, we set it to infinity), you can use this method +// to indicate that all the older versions can be discarded and removed during +// compactions. +// +// The current transaction keeps a reference to the key and val byte slice +// arguments. Users must not modify key and val until the end of the +// transaction. +func (txn *Txn) SetWithDiscard(key, val []byte, meta byte) error { + e := &Entry{ + Key: key, + Value: val, + UserMeta: meta, + meta: bitDiscardEarlierVersions, + } + return txn.SetEntry(e) +} + +// SetWithTTL adds a key-value pair to the database, along with a time-to-live +// (TTL) setting. A key stored with a TTL would automatically expire after the +// time has elapsed , and be eligible for garbage collection. +// +// The current transaction keeps a reference to the key and val byte slice +// arguments. Users must not modify key and val until the end of the +// transaction. +func (txn *Txn) SetWithTTL(key, val []byte, dur time.Duration) error { + expire := time.Now().Add(dur).Unix() + e := &Entry{Key: key, Value: val, ExpiresAt: uint64(expire)} + return txn.SetEntry(e) +} + +func exceedsSize(prefix string, max int64, key []byte) error { + return errors.Errorf("%s with size %d exceeded %d limit. %s:\n%s", + prefix, len(key), max, prefix, hex.Dump(key[:1<<10])) +} + +func (txn *Txn) modify(e *Entry) error { + const maxKeySize = 65000 + + switch { + case !txn.update: + return ErrReadOnlyTxn + case txn.discarded: + return ErrDiscardedTxn + case len(e.Key) == 0: + return ErrEmptyKey + case bytes.HasPrefix(e.Key, badgerPrefix): + return ErrInvalidKey + case len(e.Key) > maxKeySize: + // Key length can't be more than uint16, as determined by table::header. To + // keep things safe and allow badger move prefix and a timestamp suffix, let's + // cut it down to 65000, instead of using 65536. + return exceedsSize("Key", maxKeySize, e.Key) + case int64(len(e.Value)) > txn.db.opt.ValueLogFileSize: + return exceedsSize("Value", txn.db.opt.ValueLogFileSize, e.Value) + } + + if err := txn.checkSize(e); err != nil { + return err + } + fp := farm.Fingerprint64(e.Key) // Avoid dealing with byte arrays. + txn.writes = append(txn.writes, fp) + txn.pendingWrites[string(e.Key)] = e + return nil +} + +// SetEntry takes an Entry struct and adds the key-value pair in the struct, +// along with other metadata to the database. +// +// The current transaction keeps a reference to the entry passed in argument. +// Users must not modify the entry until the end of the transaction. +func (txn *Txn) SetEntry(e *Entry) error { + return txn.modify(e) +} + +// Delete deletes a key. +// +// This is done by adding a delete marker for the key at commit timestamp. Any +// reads happening before this timestamp would be unaffected. Any reads after +// this commit would see the deletion. +// +// The current transaction keeps a reference to the key byte slice argument. +// Users must not modify the key until the end of the transaction. +func (txn *Txn) Delete(key []byte) error { + e := &Entry{ + Key: key, + meta: bitDelete, + } + return txn.modify(e) +} + +// Get looks for key and returns corresponding Item. +// If key is not found, ErrKeyNotFound is returned. +func (txn *Txn) Get(key []byte) (item *Item, rerr error) { + if len(key) == 0 { + return nil, ErrEmptyKey + } else if txn.discarded { + return nil, ErrDiscardedTxn + } + + item = new(Item) + if txn.update { + if e, has := txn.pendingWrites[string(key)]; has && bytes.Equal(key, e.Key) { + if isDeletedOrExpired(e.meta, e.ExpiresAt) { + return nil, ErrKeyNotFound + } + // Fulfill from cache. + item.meta = e.meta + item.val = e.Value + item.userMeta = e.UserMeta + item.key = key + item.status = prefetched + item.version = txn.readTs + item.expiresAt = e.ExpiresAt + // We probably don't need to set db on item here. + return item, nil + } + // Only track reads if this is update txn. No need to track read if txn serviced it + // internally. + txn.addReadKey(key) + } + + seek := y.KeyWithTs(key, txn.readTs) + vs, err := txn.db.get(seek) + if err != nil { + return nil, errors.Wrapf(err, "DB::Get key: %q", key) + } + if vs.Value == nil && vs.Meta == 0 { + return nil, ErrKeyNotFound + } + if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) { + return nil, ErrKeyNotFound + } + + item.key = key + item.version = vs.Version + item.meta = vs.Meta + item.userMeta = vs.UserMeta + item.db = txn.db + item.vptr = vs.Value // TODO: Do we need to copy this over? + item.txn = txn + item.expiresAt = vs.ExpiresAt + return item, nil +} + +func (txn *Txn) addReadKey(key []byte) { + if txn.update { + fp := farm.Fingerprint64(key) + txn.reads = append(txn.reads, fp) + } +} + +// Discard discards a created transaction. This method is very important and must be called. Commit +// method calls this internally, however, calling this multiple times doesn't cause any issues. So, +// this can safely be called via a defer right when transaction is created. +// +// NOTE: If any operations are run on a discarded transaction, ErrDiscardedTxn is returned. +func (txn *Txn) Discard() { + if txn.discarded { // Avoid a re-run. + return + } + if atomic.LoadInt32(&txn.numIterators) > 0 { + panic("Unclosed iterator at time of Txn.Discard.") + } + txn.discarded = true + if !txn.db.orc.isManaged { + txn.db.orc.readMark.Done(txn.readTs) + } + if txn.update { + txn.db.orc.decrRef() + } +} + +func (txn *Txn) commitAndSend() (func() error, error) { + orc := txn.db.orc + // Ensure that the order in which we get the commit timestamp is the same as + // the order in which we push these updates to the write channel. So, we + // acquire a writeChLock before getting a commit timestamp, and only release + // it after pushing the entries to it. + orc.writeChLock.Lock() + defer orc.writeChLock.Unlock() + + commitTs := orc.newCommitTs(txn) + if commitTs == 0 { + return nil, ErrConflict + } + + // The following debug information is what led to determining the cause of + // bank txn violation bug, and it took a whole bunch of effort to narrow it + // down to here. So, keep this around for at least a couple of months. + // var b strings.Builder + // fmt.Fprintf(&b, "Read: %d. Commit: %d. reads: %v. writes: %v. Keys: ", + // txn.readTs, commitTs, txn.reads, txn.writes) + entries := make([]*Entry, 0, len(txn.pendingWrites)+1) + for _, e := range txn.pendingWrites { + // fmt.Fprintf(&b, "[%q : %q], ", e.Key, e.Value) + + // Suffix the keys with commit ts, so the key versions are sorted in + // descending order of commit timestamp. + e.Key = y.KeyWithTs(e.Key, commitTs) + e.meta |= bitTxn + entries = append(entries, e) + } + // log.Printf("%s\n", b.String()) + e := &Entry{ + Key: y.KeyWithTs(txnKey, commitTs), + Value: []byte(strconv.FormatUint(commitTs, 10)), + meta: bitFinTxn, + } + entries = append(entries, e) + + req, err := txn.db.sendToWriteCh(entries) + if err != nil { + orc.doneCommit(commitTs) + return nil, err + } + ret := func() error { + err := req.Wait() + // Wait before marking commitTs as done. + // We can't defer doneCommit above, because it is being called from a + // callback here. + orc.doneCommit(commitTs) + return err + } + return ret, nil +} + +func (txn *Txn) commitPrecheck() { + if txn.commitTs == 0 && txn.db.opt.managedTxns { + panic("Commit cannot be called with managedDB=true. Use CommitAt.") + } + if txn.discarded { + panic("Trying to commit a discarded txn") + } +} + +// Commit commits the transaction, following these steps: +// +// 1. If there are no writes, return immediately. +// +// 2. Check if read rows were updated since txn started. If so, return ErrConflict. +// +// 3. If no conflict, generate a commit timestamp and update written rows' commit ts. +// +// 4. Batch up all writes, write them to value log and LSM tree. +// +// 5. If callback is provided, Badger will return immediately after checking +// for conflicts. Writes to the database will happen in the background. If +// there is a conflict, an error will be returned and the callback will not +// run. If there are no conflicts, the callback will be called in the +// background upon successful completion of writes or any error during write. +// +// If error is nil, the transaction is successfully committed. In case of a non-nil error, the LSM +// tree won't be updated, so there's no need for any rollback. +func (txn *Txn) Commit() error { + txn.commitPrecheck() // Precheck before discarding txn. + defer txn.Discard() + + if len(txn.writes) == 0 { + return nil // Nothing to do. + } + + txnCb, err := txn.commitAndSend() + if err != nil { + return err + } + // If batchSet failed, LSM would not have been updated. So, no need to rollback anything. + + // TODO: What if some of the txns successfully make it to value log, but others fail. + // Nothing gets updated to LSM, until a restart happens. + return txnCb() +} + +type txnCb struct { + commit func() error + user func(error) + err error +} + +func runTxnCallback(cb *txnCb) { + switch { + case cb == nil: + panic("txn callback is nil") + case cb.user == nil: + panic("Must have caught a nil callback for txn.CommitWith") + case cb.err != nil: + cb.user(cb.err) + case cb.commit != nil: + err := cb.commit() + cb.user(err) + default: + cb.user(nil) + } +} + +// CommitWith acts like Commit, but takes a callback, which gets run via a +// goroutine to avoid blocking this function. The callback is guaranteed to run, +// so it is safe to increment sync.WaitGroup before calling CommitWith, and +// decrementing it in the callback; to block until all callbacks are run. +func (txn *Txn) CommitWith(cb func(error)) { + txn.commitPrecheck() // Precheck before discarding txn. + defer txn.Discard() + + if cb == nil { + panic("Nil callback provided to CommitWith") + } + + if len(txn.writes) == 0 { + // Do not run these callbacks from here, because the CommitWith and the + // callback might be acquiring the same locks. Instead run the callback + // from another goroutine. + go runTxnCallback(&txnCb{user: cb, err: nil}) + return + } + + commitCb, err := txn.commitAndSend() + if err != nil { + go runTxnCallback(&txnCb{user: cb, err: err}) + return + } + + go runTxnCallback(&txnCb{user: cb, commit: commitCb}) +} + +// ReadTs returns the read timestamp of the transaction. +func (txn *Txn) ReadTs() uint64 { + return txn.readTs +} + +// NewTransaction creates a new transaction. Badger supports concurrent execution of transactions, +// providing serializable snapshot isolation, avoiding write skews. Badger achieves this by tracking +// the keys read and at Commit time, ensuring that these read keys weren't concurrently modified by +// another transaction. +// +// For read-only transactions, set update to false. In this mode, we don't track the rows read for +// any changes. Thus, any long running iterations done in this mode wouldn't pay this overhead. +// +// Running transactions concurrently is OK. However, a transaction itself isn't thread safe, and +// should only be run serially. It doesn't matter if a transaction is created by one goroutine and +// passed down to other, as long as the Txn APIs are called serially. +// +// When you create a new transaction, it is absolutely essential to call +// Discard(). This should be done irrespective of what the update param is set +// to. Commit API internally runs Discard, but running it twice wouldn't cause +// any issues. +// +// txn := db.NewTransaction(false) +// defer txn.Discard() +// // Call various APIs. +func (db *DB) NewTransaction(update bool) *Txn { + return db.newTransaction(update, false) +} + +func (db *DB) newTransaction(update, isManaged bool) *Txn { + if db.opt.ReadOnly && update { + // DB is read-only, force read-only transaction. + update = false + } + + txn := &Txn{ + update: update, + db: db, + count: 1, // One extra entry for BitFin. + size: int64(len(txnKey) + 10), // Some buffer for the extra entry. + } + if update { + txn.pendingWrites = make(map[string]*Entry) + txn.db.orc.addRef() + } + // It is important that the oracle addRef happens BEFORE we retrieve a read + // timestamp. Otherwise, it is possible that the oracle commit map would + // become nil after we get the read timestamp. + // The sequence of events can be: + // 1. This txn gets a read timestamp. + // 2. Another txn working on the same keyset commits them, and decrements + // the reference to oracle. + // 3. Oracle ref reaches zero, resetting commit map. + // 4. This txn increments the oracle reference. + // 5. Now this txn would go on to commit the keyset, and no conflicts + // would be detected. + // See issue: https://github.com/dgraph-io/badger/issues/574 + if !isManaged { + txn.readTs = db.orc.readTs() + } + return txn +} + +// View executes a function creating and managing a read-only transaction for the user. Error +// returned by the function is relayed by the View method. +// If View is used with managed transactions, it would assume a read timestamp of MaxUint64. +func (db *DB) View(fn func(txn *Txn) error) error { + var txn *Txn + if db.opt.managedTxns { + txn = db.NewTransactionAt(math.MaxUint64, false) + } else { + txn = db.NewTransaction(false) + } + defer txn.Discard() + + return fn(txn) +} + +// Update executes a function, creating and managing a read-write transaction +// for the user. Error returned by the function is relayed by the Update method. +// Update cannot be used with managed transactions. +func (db *DB) Update(fn func(txn *Txn) error) error { + if db.opt.managedTxns { + panic("Update can only be used with managedDB=false.") + } + txn := db.NewTransaction(true) + defer txn.Discard() + + if err := fn(txn); err != nil { + return err + } + + return txn.Commit() +} diff --git a/vendor/github.com/dgraph-io/badger/txn_test.go b/vendor/github.com/dgraph-io/badger/txn_test.go new file mode 100644 index 00000000..557a5bdf --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/txn_test.go @@ -0,0 +1,845 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "fmt" + "io/ioutil" + "math/rand" + "os" + "strconv" + "sync" + "testing" + "time" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/options" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" + + "github.com/stretchr/testify/require" +) + +func TestTxnSimple(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + + txn := db.NewTransaction(true) + + for i := 0; i < 10; i++ { + k := []byte(fmt.Sprintf("key=%d", i)) + v := []byte(fmt.Sprintf("val=%d", i)) + txn.Set(k, v) + } + + item, err := txn.Get([]byte("key=8")) + require.NoError(t, err) + + require.NoError(t, item.Value(func(val []byte) error { + require.Equal(t, []byte("val=8"), val) + return nil + })) + + require.Panics(t, func() { txn.CommitAt(100, nil) }) + require.NoError(t, txn.Commit()) + }) +} + +func TestTxnReadAfterWrite(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + var wg sync.WaitGroup + N := 100 + wg.Add(N) + for i := 0; i < N; i++ { + go func(i int) { + defer wg.Done() + key := []byte(fmt.Sprintf("key%d", i)) + err := db.Update(func(tx *Txn) error { + return tx.Set(key, key) + }) + require.NoError(t, err) + err = db.View(func(tx *Txn) error { + item, err := tx.Get(key) + require.NoError(t, err) + val, err := item.ValueCopy(nil) + require.NoError(t, err) + require.Equal(t, val, key) + return nil + }) + require.NoError(t, err) + }(i) + } + wg.Wait() + }) +} + +func TestTxnCommitAsync(t *testing.T) { + key := func(i int) []byte { + return []byte(fmt.Sprintf("key=%d", i)) + } + + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + txn := db.NewTransaction(true) + for i := 0; i < 40; i++ { + err := txn.Set(key(i), []byte(strconv.Itoa(100))) + require.NoError(t, err) + } + require.NoError(t, txn.Commit()) + txn.Discard() + + closer := y.NewCloser(1) + go func() { + defer closer.Done() + for { + select { + case <-closer.HasBeenClosed(): + return + default: + } + // Keep checking balance variant + txn := db.NewTransaction(false) + totalBalance := 0 + for i := 0; i < 40; i++ { + item, err := txn.Get(key(i)) + require.NoError(t, err) + val, err := item.ValueCopy(nil) + require.NoError(t, err) + bal, err := strconv.Atoi(string(val)) + require.NoError(t, err) + totalBalance += bal + } + require.Equal(t, totalBalance, 4000) + txn.Discard() + } + }() + + var wg sync.WaitGroup + wg.Add(100) + for i := 0; i < 100; i++ { + go func() { + txn := db.NewTransaction(true) + delta := rand.Intn(100) + for i := 0; i < 20; i++ { + err := txn.Set(key(i), []byte(strconv.Itoa(100-delta))) + require.NoError(t, err) + } + for i := 20; i < 40; i++ { + err := txn.Set(key(i), []byte(strconv.Itoa(100+delta))) + require.NoError(t, err) + } + // We are only doing writes, so there won't be any conflicts. + txn.CommitWith(func(err error) {}) + txn.Discard() + wg.Done() + }() + } + wg.Wait() + closer.SignalAndWait() + time.Sleep(time.Millisecond * 10) // allow goroutine to complete. + }) +} + +func TestTxnVersions(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + k := []byte("key") + for i := 1; i < 10; i++ { + txn := db.NewTransaction(true) + + txn.Set(k, []byte(fmt.Sprintf("valversion=%d", i))) + require.NoError(t, txn.Commit()) + require.Equal(t, uint64(i), db.orc.readTs()) + } + + checkIterator := func(itr *Iterator, i int) { + defer itr.Close() + count := 0 + for itr.Rewind(); itr.Valid(); itr.Next() { + item := itr.Item() + require.Equal(t, k, item.Key()) + + val, err := item.ValueCopy(nil) + require.NoError(t, err) + exp := fmt.Sprintf("valversion=%d", i) + require.Equal(t, exp, string(val), "i=%d", i) + count++ + } + require.Equal(t, 1, count, "i=%d", i) // Should only loop once. + } + + checkAllVersions := func(itr *Iterator, i int) { + var version uint64 + if itr.opt.Reverse { + version = 1 + } else { + version = uint64(i) + } + + count := 0 + for itr.Rewind(); itr.Valid(); itr.Next() { + item := itr.Item() + require.Equal(t, k, item.Key()) + require.Equal(t, version, item.Version()) + + val, err := item.ValueCopy(nil) + require.NoError(t, err) + exp := fmt.Sprintf("valversion=%d", version) + require.Equal(t, exp, string(val), "v=%d", version) + count++ + + if itr.opt.Reverse { + version++ + } else { + version-- + } + } + require.Equal(t, i, count, "i=%d", i) // Should loop as many times as i. + } + + for i := 1; i < 10; i++ { + txn := db.NewTransaction(true) + txn.readTs = uint64(i) // Read version at i. + + item, err := txn.Get(k) + require.NoError(t, err) + + val, err := item.ValueCopy(nil) + require.NoError(t, err) + require.Equal(t, []byte(fmt.Sprintf("valversion=%d", i)), val, + "Expected versions to match up at i=%d", i) + + // Try retrieving the latest version forward and reverse. + itr := txn.NewIterator(DefaultIteratorOptions) + checkIterator(itr, i) + + opt := DefaultIteratorOptions + opt.Reverse = true + itr = txn.NewIterator(opt) + checkIterator(itr, i) + + // Now try retrieving all versions forward and reverse. + opt = DefaultIteratorOptions + opt.AllVersions = true + itr = txn.NewIterator(opt) + checkAllVersions(itr, i) + itr.Close() + + opt = DefaultIteratorOptions + opt.AllVersions = true + opt.Reverse = true + itr = txn.NewIterator(opt) + checkAllVersions(itr, i) + itr.Close() + + txn.Discard() + } + txn := db.NewTransaction(true) + defer txn.Discard() + item, err := txn.Get(k) + require.NoError(t, err) + + val, err := item.ValueCopy(nil) + require.NoError(t, err) + require.Equal(t, []byte("valversion=9"), val) + }) +} + +func TestTxnWriteSkew(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + // Accounts + ax := []byte("x") + ay := []byte("y") + + // Set balance to $100 in each account. + txn := db.NewTransaction(true) + defer txn.Discard() + val := []byte(strconv.Itoa(100)) + txn.Set(ax, val) + txn.Set(ay, val) + require.NoError(t, txn.Commit()) + require.Equal(t, uint64(1), db.orc.readTs()) + + getBal := func(txn *Txn, key []byte) (bal int) { + item, err := txn.Get(key) + require.NoError(t, err) + + val, err := item.ValueCopy(nil) + require.NoError(t, err) + bal, err = strconv.Atoi(string(val)) + require.NoError(t, err) + return bal + } + + // Start two transactions, each would read both accounts and deduct from one account. + txn1 := db.NewTransaction(true) + + sum := getBal(txn1, ax) + sum += getBal(txn1, ay) + require.Equal(t, 200, sum) + txn1.Set(ax, []byte("0")) // Deduct 100 from ax. + + // Let's read this back. + sum = getBal(txn1, ax) + require.Equal(t, 0, sum) + sum += getBal(txn1, ay) + require.Equal(t, 100, sum) + // Don't commit yet. + + txn2 := db.NewTransaction(true) + + sum = getBal(txn2, ax) + sum += getBal(txn2, ay) + require.Equal(t, 200, sum) + txn2.Set(ay, []byte("0")) // Deduct 100 from ay. + + // Let's read this back. + sum = getBal(txn2, ax) + require.Equal(t, 100, sum) + sum += getBal(txn2, ay) + require.Equal(t, 100, sum) + + // Commit both now. + require.NoError(t, txn1.Commit()) + require.Error(t, txn2.Commit()) // This should fail. + + require.Equal(t, uint64(2), db.orc.readTs()) + }) +} + +// a3, a2, b4 (del), b3, c2, c1 +// Read at ts=4 -> a3, c2 +// Read at ts=4(Uncommitted) -> a3, b4 +// Read at ts=3 -> a3, b3, c2 +// Read at ts=2 -> a2, c2 +// Read at ts=1 -> c1 +func TestTxnIterationEdgeCase(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + ka := []byte("a") + kb := []byte("b") + kc := []byte("c") + + // c1 + txn := db.NewTransaction(true) + txn.Set(kc, []byte("c1")) + require.NoError(t, txn.Commit()) + require.Equal(t, uint64(1), db.orc.readTs()) + + // a2, c2 + txn = db.NewTransaction(true) + txn.Set(ka, []byte("a2")) + txn.Set(kc, []byte("c2")) + require.NoError(t, txn.Commit()) + require.Equal(t, uint64(2), db.orc.readTs()) + + // b3 + txn = db.NewTransaction(true) + txn.Set(ka, []byte("a3")) + txn.Set(kb, []byte("b3")) + require.NoError(t, txn.Commit()) + require.Equal(t, uint64(3), db.orc.readTs()) + + // b4, c4(del) (Uncommitted) + txn4 := db.NewTransaction(true) + require.NoError(t, txn4.Set(kb, []byte("b4"))) + require.NoError(t, txn4.Delete(kc)) + require.Equal(t, uint64(3), db.orc.readTs()) + + // b4 (del) + txn = db.NewTransaction(true) + txn.Delete(kb) + require.NoError(t, txn.Commit()) + require.Equal(t, uint64(4), db.orc.readTs()) + + checkIterator := func(itr *Iterator, expected []string) { + defer itr.Close() + var i int + for itr.Rewind(); itr.Valid(); itr.Next() { + item := itr.Item() + val, err := item.ValueCopy(nil) + require.NoError(t, err) + require.Equal(t, expected[i], string(val), "readts=%d", itr.readTs) + i++ + } + require.Equal(t, len(expected), i) + } + + txn = db.NewTransaction(true) + defer txn.Discard() + itr := txn.NewIterator(DefaultIteratorOptions) + itr5 := txn4.NewIterator(DefaultIteratorOptions) + checkIterator(itr, []string{"a3", "c2"}) + checkIterator(itr5, []string{"a3", "b4"}) + + rev := DefaultIteratorOptions + rev.Reverse = true + itr = txn.NewIterator(rev) + itr5 = txn4.NewIterator(rev) + checkIterator(itr, []string{"c2", "a3"}) + checkIterator(itr5, []string{"b4", "a3"}) + + txn.readTs = 3 + itr = txn.NewIterator(DefaultIteratorOptions) + checkIterator(itr, []string{"a3", "b3", "c2"}) + itr = txn.NewIterator(rev) + checkIterator(itr, []string{"c2", "b3", "a3"}) + + txn.readTs = 2 + itr = txn.NewIterator(DefaultIteratorOptions) + checkIterator(itr, []string{"a2", "c2"}) + itr = txn.NewIterator(rev) + checkIterator(itr, []string{"c2", "a2"}) + + txn.readTs = 1 + itr = txn.NewIterator(DefaultIteratorOptions) + checkIterator(itr, []string{"c1"}) + itr = txn.NewIterator(rev) + checkIterator(itr, []string{"c1"}) + }) +} + +// a2, a3, b4 (del), b3, c2, c1 +// Read at ts=4 -> a3, c2 +// Read at ts=3 -> a3, b3, c2 +// Read at ts=2 -> a2, c2 +// Read at ts=1 -> c1 +func TestTxnIterationEdgeCase2(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + ka := []byte("a") + kb := []byte("aa") + kc := []byte("aaa") + + // c1 + txn := db.NewTransaction(true) + txn.Set(kc, []byte("c1")) + require.NoError(t, txn.Commit()) + require.Equal(t, uint64(1), db.orc.readTs()) + + // a2, c2 + txn = db.NewTransaction(true) + txn.Set(ka, []byte("a2")) + txn.Set(kc, []byte("c2")) + require.NoError(t, txn.Commit()) + require.Equal(t, uint64(2), db.orc.readTs()) + + // b3 + txn = db.NewTransaction(true) + txn.Set(ka, []byte("a3")) + txn.Set(kb, []byte("b3")) + require.NoError(t, txn.Commit()) + require.Equal(t, uint64(3), db.orc.readTs()) + + // b4 (del) + txn = db.NewTransaction(true) + txn.Delete(kb) + require.NoError(t, txn.Commit()) + require.Equal(t, uint64(4), db.orc.readTs()) + + checkIterator := func(itr *Iterator, expected []string) { + defer itr.Close() + var i int + for itr.Rewind(); itr.Valid(); itr.Next() { + item := itr.Item() + val, err := item.ValueCopy(nil) + require.NoError(t, err) + require.Equal(t, expected[i], string(val), "readts=%d", itr.readTs) + i++ + } + require.Equal(t, len(expected), i) + } + txn = db.NewTransaction(true) + defer txn.Discard() + rev := DefaultIteratorOptions + rev.Reverse = true + + itr := txn.NewIterator(DefaultIteratorOptions) + checkIterator(itr, []string{"a3", "c2"}) + itr = txn.NewIterator(rev) + checkIterator(itr, []string{"c2", "a3"}) + + txn.readTs = 5 + itr = txn.NewIterator(DefaultIteratorOptions) + itr.Seek(ka) + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), ka) + itr.Seek(kc) + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), kc) + itr.Close() + + itr = txn.NewIterator(rev) + itr.Seek(ka) + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), ka) + itr.Seek(kc) + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), kc) + itr.Close() + + txn.readTs = 3 + itr = txn.NewIterator(DefaultIteratorOptions) + checkIterator(itr, []string{"a3", "b3", "c2"}) + itr = txn.NewIterator(rev) + checkIterator(itr, []string{"c2", "b3", "a3"}) + + txn.readTs = 2 + itr = txn.NewIterator(DefaultIteratorOptions) + checkIterator(itr, []string{"a2", "c2"}) + itr = txn.NewIterator(rev) + checkIterator(itr, []string{"c2", "a2"}) + + txn.readTs = 1 + itr = txn.NewIterator(DefaultIteratorOptions) + checkIterator(itr, []string{"c1"}) + itr = txn.NewIterator(rev) + checkIterator(itr, []string{"c1"}) + }) +} + +func TestTxnIterationEdgeCase3(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + kb := []byte("abc") + kc := []byte("acd") + kd := []byte("ade") + + // c1 + txn := db.NewTransaction(true) + txn.Set(kc, []byte("c1")) + require.NoError(t, txn.Commit()) + require.Equal(t, uint64(1), db.orc.readTs()) + + // b2 + txn = db.NewTransaction(true) + txn.Set(kb, []byte("b2")) + require.NoError(t, txn.Commit()) + require.Equal(t, uint64(2), db.orc.readTs()) + + txn2 := db.NewTransaction(true) + require.NoError(t, txn2.Set(kd, []byte("d2"))) + require.NoError(t, txn2.Delete(kc)) + + txn = db.NewTransaction(true) + defer txn.Discard() + rev := DefaultIteratorOptions + rev.Reverse = true + + itr := txn.NewIterator(DefaultIteratorOptions) + itr.Seek([]byte("ab")) + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), kb) + itr.Seek([]byte("ac")) + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), kc) + itr.Seek(nil) + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), kb) + itr.Seek([]byte("ac")) + itr.Rewind() + itr.Seek(nil) + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), kb) + itr.Seek([]byte("ac")) + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), kc) + itr.Close() + + // Keys: "abc", "ade" + // Read pending writes. + itr = txn2.NewIterator(DefaultIteratorOptions) + itr.Seek([]byte("ab")) + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), kb) + itr.Seek([]byte("ac")) + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), kd) + itr.Seek(nil) + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), kb) + itr.Seek([]byte("ac")) + itr.Rewind() + itr.Seek(nil) + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), kb) + itr.Seek([]byte("ad")) + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), kd) + itr.Close() + + itr = txn.NewIterator(rev) + itr.Seek([]byte("ac")) + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), kb) + itr.Seek([]byte("ad")) + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), kc) + itr.Seek(nil) + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), kc) + itr.Seek([]byte("ac")) + itr.Rewind() + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), kc) + itr.Seek([]byte("ad")) + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), kc) + itr.Close() + + // Keys: "abc", "ade" + itr = txn2.NewIterator(rev) + itr.Seek([]byte("ad")) + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), kb) + itr.Seek([]byte("ae")) + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), kd) + itr.Seek(nil) + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), kd) + itr.Seek([]byte("ab")) + itr.Rewind() + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), kd) + itr.Seek([]byte("ac")) + require.True(t, itr.Valid()) + require.Equal(t, itr.item.Key(), kb) + itr.Close() + }) +} + +func TestIteratorAllVersionsWithDeleted(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + // Write two keys + err := db.Update(func(txn *Txn) error { + txn.Set([]byte("answer1"), []byte("42")) + txn.Set([]byte("answer2"), []byte("43")) + return nil + }) + require.NoError(t, err) + + // Delete the specific key version from underlying db directly + err = db.View(func(txn *Txn) error { + item, err := txn.Get([]byte("answer1")) + require.NoError(t, err) + err = txn.db.batchSet([]*Entry{ + { + Key: y.KeyWithTs(item.key, item.version), + meta: bitDelete, + }, + }) + require.NoError(t, err) + return err + }) + require.NoError(t, err) + + opts := DefaultIteratorOptions + opts.AllVersions = true + opts.PrefetchValues = false + + // Verify that deleted shows up when AllVersions is set. + err = db.View(func(txn *Txn) error { + it := txn.NewIterator(opts) + defer it.Close() + var count int + for it.Rewind(); it.Valid(); it.Next() { + count++ + item := it.Item() + if count == 1 { + require.Equal(t, []byte("answer1"), item.Key()) + require.True(t, item.meta&bitDelete > 0) + } else { + require.Equal(t, []byte("answer2"), item.Key()) + } + } + require.Equal(t, 2, count) + return nil + }) + require.NoError(t, err) + }) +} + +func TestIteratorAllVersionsWithDeleted2(t *testing.T) { + runBadgerTest(t, nil, func(t *testing.T, db *DB) { + // Set and delete alternatively + for i := 0; i < 4; i++ { + err := db.Update(func(txn *Txn) error { + if i%2 == 0 { + txn.Set([]byte("key"), []byte("value")) + return nil + } + txn.Delete([]byte("key")) + return nil + }) + require.NoError(t, err) + } + + opts := DefaultIteratorOptions + opts.AllVersions = true + opts.PrefetchValues = false + + // Verify that deleted shows up when AllVersions is set. + err := db.View(func(txn *Txn) error { + it := txn.NewIterator(opts) + defer it.Close() + var count int + for it.Rewind(); it.Valid(); it.Next() { + item := it.Item() + require.Equal(t, []byte("key"), item.Key()) + if count%2 != 0 { + val, err := item.ValueCopy(nil) + require.NoError(t, err) + require.Equal(t, val, []byte("value")) + } else { + require.True(t, item.meta&bitDelete > 0) + } + count++ + } + require.Equal(t, 4, count) + return nil + }) + require.NoError(t, err) + }) +} + +func TestManagedDB(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + + opt := getTestOptions(dir) + opt.managedTxns = true + db, err := Open(opt) + require.NoError(t, err) + defer db.Close() + + key := func(i int) []byte { + return []byte(fmt.Sprintf("key-%02d", i)) + } + + val := func(i int) []byte { + return []byte(fmt.Sprintf("val-%d", i)) + } + + require.Panics(t, func() { + db.Update(func(tx *Txn) error { return nil }) + }) + + err = db.View(func(tx *Txn) error { return nil }) + require.NoError(t, err) + + // Write data at t=3. + txn := db.NewTransactionAt(3, true) + for i := 0; i <= 3; i++ { + require.NoError(t, txn.Set(key(i), val(i))) + } + require.Panics(t, func() { txn.Commit() }) + require.NoError(t, txn.CommitAt(3, nil)) + + // Read data at t=2. + txn = db.NewTransactionAt(2, false) + for i := 0; i <= 3; i++ { + _, err := txn.Get(key(i)) + require.Equal(t, ErrKeyNotFound, err) + } + txn.Discard() + + // Read data at t=3. + txn = db.NewTransactionAt(3, false) + for i := 0; i <= 3; i++ { + item, err := txn.Get(key(i)) + require.NoError(t, err) + require.Equal(t, uint64(3), item.Version()) + v, err := item.ValueCopy(nil) + require.NoError(t, err) + require.Equal(t, val(i), v) + } + txn.Discard() + + // Write data at t=7. + txn = db.NewTransactionAt(6, true) + for i := 0; i <= 7; i++ { + _, err := txn.Get(key(i)) + if err == nil { + continue // Don't overwrite existing keys. + } + require.NoError(t, txn.Set(key(i), val(i))) + } + require.NoError(t, txn.CommitAt(7, nil)) + + // Read data at t=9. + txn = db.NewTransactionAt(9, false) + for i := 0; i <= 9; i++ { + item, err := txn.Get(key(i)) + if i <= 7 { + require.NoError(t, err) + } else { + require.Equal(t, ErrKeyNotFound, err) + } + + if i <= 3 { + require.Equal(t, uint64(3), item.Version()) + } else if i <= 7 { + require.Equal(t, uint64(7), item.Version()) + } + if i <= 7 { + v, err := item.ValueCopy(nil) + require.NoError(t, err) + require.Equal(t, val(i), v) + } + } + txn.Discard() +} + +func TestArmV7Issue311Fix(t *testing.T) { + dir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + config := DefaultOptions + config.TableLoadingMode = options.MemoryMap + config.ValueLogFileSize = 16 << 20 + config.LevelOneSize = 8 << 20 + config.MaxTableSize = 2 << 20 + config.Dir = dir + config.ValueDir = dir + config.SyncWrites = false + + db, err := Open(config) + if err != nil { + t.Fatalf("cannot open db at location %s: %v", dir, err) + } + + err = db.View(func(txn *Txn) error { return nil }) + if err != nil { + t.Fatal(err) + } + + err = db.Update(func(txn *Txn) error { + return txn.Set([]byte{0x11}, []byte{0x22}) + }) + if err != nil { + t.Fatal(err) + } + + err = db.Update(func(txn *Txn) error { + return txn.Set([]byte{0x11}, []byte{0x22}) + }) + + if err != nil { + t.Fatal(err) + } + + if err = db.Close(); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/dgraph-io/badger/util.go b/vendor/github.com/dgraph-io/badger/util.go new file mode 100644 index 00000000..e74fc87f --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/util.go @@ -0,0 +1,141 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "encoding/hex" + "io/ioutil" + "math/rand" + "sync/atomic" + "time" + + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/table" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" + "gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors" +) + +// summary is produced when DB is closed. Currently it is used only for testing. +type summary struct { + fileIDs map[uint64]bool +} + +func (s *levelsController) getSummary() *summary { + out := &summary{ + fileIDs: make(map[uint64]bool), + } + for _, l := range s.levels { + l.getSummary(out) + } + return out +} + +func (s *levelHandler) getSummary(sum *summary) { + s.RLock() + defer s.RUnlock() + for _, t := range s.tables { + sum.fileIDs[t.ID()] = true + } +} + +func (s *DB) validate() error { return s.lc.validate() } + +func (s *levelsController) validate() error { + for _, l := range s.levels { + if err := l.validate(); err != nil { + return errors.Wrap(err, "Levels Controller") + } + } + return nil +} + +// Check does some sanity check on one level of data or in-memory index. +func (s *levelHandler) validate() error { + if s.level == 0 { + return nil + } + + s.RLock() + defer s.RUnlock() + numTables := len(s.tables) + for j := 1; j < numTables; j++ { + if j >= len(s.tables) { + return errors.Errorf("Level %d, j=%d numTables=%d", s.level, j, numTables) + } + + if y.CompareKeys(s.tables[j-1].Biggest(), s.tables[j].Smallest()) >= 0 { + return errors.Errorf( + "Inter: Biggest(j-1) \n%s\n vs Smallest(j): \n%s\n: level=%d j=%d numTables=%d", + hex.Dump(s.tables[j-1].Biggest()), hex.Dump(s.tables[j].Smallest()), + s.level, j, numTables) + } + + if y.CompareKeys(s.tables[j].Smallest(), s.tables[j].Biggest()) > 0 { + return errors.Errorf( + "Intra: %q vs %q: level=%d j=%d numTables=%d", + s.tables[j].Smallest(), s.tables[j].Biggest(), s.level, j, numTables) + } + } + return nil +} + +// func (s *KV) debugPrintMore() { s.lc.debugPrintMore() } + +// // debugPrintMore shows key ranges of each level. +// func (s *levelsController) debugPrintMore() { +// s.Lock() +// defer s.Unlock() +// for i := 0; i < s.kv.opt.MaxLevels; i++ { +// s.levels[i].debugPrintMore() +// } +// } + +// func (s *levelHandler) debugPrintMore() { +// s.RLock() +// defer s.RUnlock() +// s.elog.Printf("Level %d:", s.level) +// for _, t := range s.tables { +// y.Printf(" [%s, %s]", t.Smallest(), t.Biggest()) +// } +// y.Printf("\n") +// } + +// reserveFileID reserves a unique file id. +func (s *levelsController) reserveFileID() uint64 { + id := atomic.AddUint64(&s.nextFileID, 1) + return id - 1 +} + +func getIDMap(dir string) map[uint64]struct{} { + fileInfos, err := ioutil.ReadDir(dir) + y.Check(err) + idMap := make(map[uint64]struct{}) + for _, info := range fileInfos { + if info.IsDir() { + continue + } + fileID, ok := table.ParseFileID(info.Name()) + if !ok { + continue + } + idMap[fileID] = struct{}{} + } + return idMap +} + +func init() { + rand.Seed(time.Now().UnixNano()) +} diff --git a/vendor/github.com/dgraph-io/badger/value.go b/vendor/github.com/dgraph-io/badger/value.go new file mode 100644 index 00000000..3c7511b2 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/value.go @@ -0,0 +1,1314 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "math" + "math/rand" + "os" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "gx/ipfs/QmRvYNctevGUW52urgmoFZscT6buMKqhHezLUS64WepGWn/go-net/trace" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/options" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" + "gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors" +) + +// Values have their first byte being byteData or byteDelete. This helps us distinguish between +// a key that has never been seen and a key that has been explicitly deleted. +const ( + bitDelete byte = 1 << 0 // Set if the key has been deleted. + bitValuePointer byte = 1 << 1 // Set if the value is NOT stored directly next to key. + bitDiscardEarlierVersions byte = 1 << 2 // Set if earlier versions can be discarded. + + // The MSB 2 bits are for transactions. + bitTxn byte = 1 << 6 // Set if the entry is part of a txn. + bitFinTxn byte = 1 << 7 // Set if the entry is to indicate end of txn in value log. + + mi int64 = 1 << 20 +) + +type logFile struct { + path string + // This is a lock on the log file. It guards the fd’s value, the file’s + // existence and the file’s memory map. + // + // Use shared ownership when reading/writing the file or memory map, use + // exclusive ownership to open/close the descriptor, unmap or remove the file. + lock sync.RWMutex + fd *os.File + fid uint32 + fmap []byte + size uint32 + loadingMode options.FileLoadingMode +} + +// openReadOnly assumes that we have a write lock on logFile. +func (lf *logFile) openReadOnly() error { + var err error + lf.fd, err = os.OpenFile(lf.path, os.O_RDONLY, 0666) + if err != nil { + return errors.Wrapf(err, "Unable to open %q as RDONLY.", lf.path) + } + + fi, err := lf.fd.Stat() + if err != nil { + return errors.Wrapf(err, "Unable to check stat for %q", lf.path) + } + y.AssertTrue(fi.Size() <= math.MaxUint32) + lf.size = uint32(fi.Size()) + + if err = lf.mmap(fi.Size()); err != nil { + _ = lf.fd.Close() + return y.Wrapf(err, "Unable to map file") + } + + return nil +} + +func (lf *logFile) mmap(size int64) (err error) { + if lf.loadingMode != options.MemoryMap { + // Nothing to do + return nil + } + lf.fmap, err = y.Mmap(lf.fd, false, size) + if err == nil { + err = y.Madvise(lf.fmap, false) // Disable readahead + } + return err +} + +func (lf *logFile) munmap() (err error) { + if lf.loadingMode != options.MemoryMap { + // Nothing to do + return nil + } + if err := y.Munmap(lf.fmap); err != nil { + return errors.Wrapf(err, "Unable to munmap value log: %q", lf.path) + } + return nil +} + +// Acquire lock on mmap/file if you are calling this +func (lf *logFile) read(p valuePointer, s *y.Slice) (buf []byte, err error) { + var nbr int64 + offset := p.Offset + if lf.loadingMode == options.FileIO { + buf = s.Resize(int(p.Len)) + var n int + n, err = lf.fd.ReadAt(buf, int64(offset)) + nbr = int64(n) + } else { + // Do not convert size to uint32, because the lf.fmap can be of size + // 4GB, which overflows the uint32 during conversion to make the size 0, + // causing the read to fail with ErrEOF. See issue #585. + size := int64(len(lf.fmap)) + valsz := p.Len + if int64(offset) >= size || int64(offset+valsz) > size { + err = y.ErrEOF + } else { + buf = lf.fmap[offset : offset+valsz] + nbr = int64(valsz) + } + } + y.NumReads.Add(1) + y.NumBytesRead.Add(nbr) + return buf, err +} + +func (lf *logFile) doneWriting(offset uint32) error { + // Sync before acquiring lock. (We call this from write() and thus know we have shared access + // to the fd.) + if err := lf.fd.Sync(); err != nil { + return errors.Wrapf(err, "Unable to sync value log: %q", lf.path) + } + // Close and reopen the file read-only. Acquire lock because fd will become invalid for a bit. + // Acquiring the lock is bad because, while we don't hold the lock for a long time, it forces + // one batch of readers wait for the preceding batch of readers to finish. + // + // If there's a benefit to reopening the file read-only, it might be on Windows. I don't know + // what the benefit is. Consider keeping the file read-write, or use fcntl to change + // permissions. + lf.lock.Lock() + defer lf.lock.Unlock() + if err := lf.munmap(); err != nil { + return err + } + // TODO: Confirm if we need to run a file sync after truncation. + // Truncation must run after unmapping, otherwise Windows would crap itself. + if err := lf.fd.Truncate(int64(offset)); err != nil { + return errors.Wrapf(err, "Unable to truncate file: %q", lf.path) + } + if err := lf.fd.Close(); err != nil { + return errors.Wrapf(err, "Unable to close value log: %q", lf.path) + } + + return lf.openReadOnly() +} + +// You must hold lf.lock to sync() +func (lf *logFile) sync() error { + return lf.fd.Sync() +} + +var errStop = errors.New("Stop iteration") +var errTruncate = errors.New("Do truncate") + +type logEntry func(e Entry, vp valuePointer) error + +type safeRead struct { + k []byte + v []byte + + recordOffset uint32 +} + +func (r *safeRead) Entry(reader *bufio.Reader) (*Entry, error) { + var hbuf [headerBufSize]byte + var err error + + hash := crc32.New(y.CastagnoliCrcTable) + tee := io.TeeReader(reader, hash) + if _, err = io.ReadFull(tee, hbuf[:]); err != nil { + return nil, err + } + + var h header + h.Decode(hbuf[:]) + if h.klen > uint32(1<<16) { // Key length must be below uint16. + return nil, errTruncate + } + kl := int(h.klen) + if cap(r.k) < kl { + r.k = make([]byte, 2*kl) + } + vl := int(h.vlen) + if cap(r.v) < vl { + r.v = make([]byte, 2*vl) + } + + e := &Entry{} + e.offset = r.recordOffset + e.Key = r.k[:kl] + e.Value = r.v[:vl] + + if _, err = io.ReadFull(tee, e.Key); err != nil { + if err == io.EOF { + err = errTruncate + } + return nil, err + } + if _, err = io.ReadFull(tee, e.Value); err != nil { + if err == io.EOF { + err = errTruncate + } + return nil, err + } + var crcBuf [4]byte + if _, err = io.ReadFull(reader, crcBuf[:]); err != nil { + if err == io.EOF { + err = errTruncate + } + return nil, err + } + crc := binary.BigEndian.Uint32(crcBuf[:]) + if crc != hash.Sum32() { + return nil, errTruncate + } + e.meta = h.meta + e.UserMeta = h.userMeta + e.ExpiresAt = h.expiresAt + return e, nil +} + +// iterate iterates over log file. It doesn't not allocate new memory for every kv pair. +// Therefore, the kv pair is only valid for the duration of fn call. +func (vlog *valueLog) iterate(lf *logFile, offset uint32, fn logEntry) (uint32, error) { + fi, err := lf.fd.Stat() + if err != nil { + return 0, err + } + if int64(offset) == fi.Size() { + // We're at the end of the file already. No need to do anything. + return offset, nil + } + if vlog.opt.ReadOnly { + // We're not at the end of the file. We'd need to replay the entries, or + // possibly truncate the file. + return 0, ErrReplayNeeded + } + + // We're not at the end of the file. Let's Seek to the offset and start reading. + if _, err := lf.fd.Seek(int64(offset), io.SeekStart); err != nil { + return 0, errFile(err, lf.path, "Unable to seek") + } + + reader := bufio.NewReader(lf.fd) + read := &safeRead{ + k: make([]byte, 10), + v: make([]byte, 10), + recordOffset: offset, + } + + var lastCommit uint64 + var validEndOffset uint32 + for { + e, err := read.Entry(reader) + if err == io.EOF { + break + } else if err == io.ErrUnexpectedEOF || err == errTruncate { + break + } else if err != nil { + return 0, err + } else if e == nil { + continue + } + + var vp valuePointer + vp.Len = uint32(headerBufSize + len(e.Key) + len(e.Value) + crc32.Size) + read.recordOffset += vp.Len + + vp.Offset = e.offset + vp.Fid = lf.fid + + if e.meta&bitTxn > 0 { + txnTs := y.ParseTs(e.Key) + if lastCommit == 0 { + lastCommit = txnTs + } + if lastCommit != txnTs { + break + } + + } else if e.meta&bitFinTxn > 0 { + txnTs, err := strconv.ParseUint(string(e.Value), 10, 64) + if err != nil || lastCommit != txnTs { + break + } + // Got the end of txn. Now we can store them. + lastCommit = 0 + validEndOffset = read.recordOffset + + } else { + if lastCommit != 0 { + // This is most likely an entry which was moved as part of GC. + // We shouldn't get this entry in the middle of a transaction. + break + } + validEndOffset = read.recordOffset + } + + if err := fn(*e, vp); err != nil { + if err == errStop { + break + } + return 0, errFile(err, lf.path, "Iteration function") + } + } + return validEndOffset, nil +} + +func (vlog *valueLog) rewrite(f *logFile, tr trace.Trace) error { + maxFid := atomic.LoadUint32(&vlog.maxFid) + y.AssertTruef(uint32(f.fid) < maxFid, "fid to move: %d. Current max fid: %d", f.fid, maxFid) + tr.LazyPrintf("Rewriting fid: %d", f.fid) + + wb := make([]*Entry, 0, 1000) + var size int64 + + y.AssertTrue(vlog.db != nil) + var count, moved int + fe := func(e Entry) error { + count++ + if count%100000 == 0 { + tr.LazyPrintf("Processing entry %d", count) + } + + vs, err := vlog.db.get(e.Key) + if err != nil { + return err + } + if discardEntry(e, vs) { + return nil + } + + // Value is still present in value log. + if len(vs.Value) == 0 { + return errors.Errorf("Empty value: %+v", vs) + } + var vp valuePointer + vp.Decode(vs.Value) + + if vp.Fid > f.fid { + return nil + } + if vp.Offset > e.offset { + return nil + } + if vp.Fid == f.fid && vp.Offset == e.offset { + moved++ + // This new entry only contains the key, and a pointer to the value. + ne := new(Entry) + ne.meta = 0 // Remove all bits. Different keyspace doesn't need these bits. + ne.UserMeta = e.UserMeta + + // Create a new key in a separate keyspace, prefixed by moveKey. We are not + // allowed to rewrite an older version of key in the LSM tree, because then this older + // version would be at the top of the LSM tree. To work correctly, reads expect the + // latest versions to be at the top, and the older versions at the bottom. + if bytes.HasPrefix(e.Key, badgerMove) { + ne.Key = append([]byte{}, e.Key...) + } else { + ne.Key = make([]byte, len(badgerMove)+len(e.Key)) + n := copy(ne.Key, badgerMove) + copy(ne.Key[n:], e.Key) + } + + ne.Value = append([]byte{}, e.Value...) + wb = append(wb, ne) + size += int64(e.estimateSize(vlog.opt.ValueThreshold)) + if size >= 64*mi { + tr.LazyPrintf("request has %d entries, size %d", len(wb), size) + if err := vlog.db.batchSet(wb); err != nil { + return err + } + size = 0 + wb = wb[:0] + } + } else { + vlog.db.opt.Warningf("This entry should have been caught. %+v\n", e) + } + return nil + } + + _, err := vlog.iterate(f, 0, func(e Entry, vp valuePointer) error { + return fe(e) + }) + if err != nil { + return err + } + + tr.LazyPrintf("request has %d entries, size %d", len(wb), size) + batchSize := 1024 + var loops int + for i := 0; i < len(wb); { + loops++ + if batchSize == 0 { + vlog.db.opt.Warningf("We shouldn't reach batch size of zero.") + return ErrNoRewrite + } + end := i + batchSize + if end > len(wb) { + end = len(wb) + } + if err := vlog.db.batchSet(wb[i:end]); err != nil { + if err == ErrTxnTooBig { + // Decrease the batch size to half. + batchSize = batchSize / 2 + tr.LazyPrintf("Dropped batch size to %d", batchSize) + continue + } + return err + } + i += batchSize + } + tr.LazyPrintf("Processed %d entries in %d loops", len(wb), loops) + tr.LazyPrintf("Total entries: %d. Moved: %d", count, moved) + tr.LazyPrintf("Removing fid: %d", f.fid) + var deleteFileNow bool + // Entries written to LSM. Remove the older file now. + { + vlog.filesLock.Lock() + // Just a sanity-check. + if _, ok := vlog.filesMap[f.fid]; !ok { + vlog.filesLock.Unlock() + return errors.Errorf("Unable to find fid: %d", f.fid) + } + if vlog.iteratorCount() == 0 { + delete(vlog.filesMap, f.fid) + deleteFileNow = true + } else { + vlog.filesToBeDeleted = append(vlog.filesToBeDeleted, f.fid) + } + vlog.filesLock.Unlock() + } + + if deleteFileNow { + vlog.deleteLogFile(f) + } + + return nil +} + +func (vlog *valueLog) deleteMoveKeysFor(fid uint32, tr trace.Trace) error { + db := vlog.db + var result []*Entry + var count, pointers uint64 + tr.LazyPrintf("Iterating over move keys to find invalids for fid: %d", fid) + err := db.View(func(txn *Txn) error { + opt := DefaultIteratorOptions + opt.internalAccess = true + opt.PrefetchValues = false + itr := txn.NewIterator(opt) + defer itr.Close() + + for itr.Seek(badgerMove); itr.ValidForPrefix(badgerMove); itr.Next() { + count++ + item := itr.Item() + if item.meta&bitValuePointer == 0 { + continue + } + pointers++ + var vp valuePointer + vp.Decode(item.vptr) + if vp.Fid == fid { + e := &Entry{Key: y.KeyWithTs(item.Key(), item.Version()), meta: bitDelete} + result = append(result, e) + } + } + return nil + }) + if err != nil { + tr.LazyPrintf("Got error while iterating move keys: %v", err) + tr.SetError() + return err + } + tr.LazyPrintf("Num total move keys: %d. Num pointers: %d", count, pointers) + tr.LazyPrintf("Number of invalid move keys found: %d", len(result)) + batchSize := 10240 + for i := 0; i < len(result); { + end := i + batchSize + if end > len(result) { + end = len(result) + } + if err := db.batchSet(result[i:end]); err != nil { + if err == ErrTxnTooBig { + batchSize /= 2 + tr.LazyPrintf("Dropped batch size to %d", batchSize) + continue + } + tr.LazyPrintf("Error while doing batchSet: %v", err) + tr.SetError() + return err + } + i += batchSize + } + tr.LazyPrintf("Move keys deletion done.") + return nil +} + +func (vlog *valueLog) incrIteratorCount() { + atomic.AddInt32(&vlog.numActiveIterators, 1) +} + +func (vlog *valueLog) iteratorCount() int { + return int(atomic.LoadInt32(&vlog.numActiveIterators)) +} + +func (vlog *valueLog) decrIteratorCount() error { + num := atomic.AddInt32(&vlog.numActiveIterators, -1) + if num != 0 { + return nil + } + + vlog.filesLock.Lock() + lfs := make([]*logFile, 0, len(vlog.filesToBeDeleted)) + for _, id := range vlog.filesToBeDeleted { + lfs = append(lfs, vlog.filesMap[id]) + delete(vlog.filesMap, id) + } + vlog.filesToBeDeleted = nil + vlog.filesLock.Unlock() + + for _, lf := range lfs { + if err := vlog.deleteLogFile(lf); err != nil { + return err + } + } + return nil +} + +func (vlog *valueLog) deleteLogFile(lf *logFile) error { + path := vlog.fpath(lf.fid) + if err := lf.munmap(); err != nil { + _ = lf.fd.Close() + return err + } + if err := lf.fd.Close(); err != nil { + return err + } + return os.Remove(path) +} + +func (vlog *valueLog) dropAll() (int, error) { + // We don't want to block dropAll on any pending transactions. So, don't worry about iterator + // count. + var count int + deleteAll := func() error { + vlog.filesLock.Lock() + defer vlog.filesLock.Unlock() + for _, lf := range vlog.filesMap { + if err := vlog.deleteLogFile(lf); err != nil { + return err + } + count++ + } + vlog.filesMap = make(map[uint32]*logFile) + return nil + } + if err := deleteAll(); err != nil { + return count, err + } + + vlog.db.opt.Infof("Value logs deleted. Creating value log file: 0") + if _, err := vlog.createVlogFile(0); err != nil { + return count, err + } + atomic.StoreUint32(&vlog.maxFid, 0) + return count, nil +} + +// lfDiscardStats keeps track of the amount of data that could be discarded for +// a given logfile. +type lfDiscardStats struct { + sync.Mutex + m map[uint32]int64 +} + +type valueLog struct { + dirPath string + elog trace.EventLog + + // guards our view of which files exist, which to be deleted, how many active iterators + filesLock sync.RWMutex + filesMap map[uint32]*logFile + filesToBeDeleted []uint32 + // A refcount of iterators -- when this hits zero, we can delete the filesToBeDeleted. + numActiveIterators int32 + + db *DB + maxFid uint32 // accessed via atomics. + writableLogOffset uint32 // read by read, written by write. Must access via atomics. + numEntriesWritten uint32 + opt Options + + garbageCh chan struct{} + lfDiscardStats *lfDiscardStats +} + +func vlogFilePath(dirPath string, fid uint32) string { + return fmt.Sprintf("%s%s%06d.vlog", dirPath, string(os.PathSeparator), fid) +} + +func (vlog *valueLog) fpath(fid uint32) string { + return vlogFilePath(vlog.dirPath, fid) +} + +func (vlog *valueLog) populateFilesMap() error { + vlog.filesMap = make(map[uint32]*logFile) + + files, err := ioutil.ReadDir(vlog.dirPath) + if err != nil { + return errFile(err, vlog.dirPath, "Unable to open log dir.") + } + + found := make(map[uint64]struct{}) + for _, file := range files { + if !strings.HasSuffix(file.Name(), ".vlog") { + continue + } + fsz := len(file.Name()) + fid, err := strconv.ParseUint(file.Name()[:fsz-5], 10, 32) + if err != nil { + return errFile(err, file.Name(), "Unable to parse log id.") + } + if _, ok := found[fid]; ok { + return errFile(err, file.Name(), "Duplicate file found. Please delete one.") + } + found[fid] = struct{}{} + + lf := &logFile{ + fid: uint32(fid), + path: vlog.fpath(uint32(fid)), + loadingMode: vlog.opt.ValueLogLoadingMode, + } + vlog.filesMap[uint32(fid)] = lf + if vlog.maxFid < uint32(fid) { + vlog.maxFid = uint32(fid) + } + } + return nil +} + +func (vlog *valueLog) createVlogFile(fid uint32) (*logFile, error) { + path := vlog.fpath(fid) + lf := &logFile{ + fid: fid, + path: path, + loadingMode: vlog.opt.ValueLogLoadingMode, + } + // writableLogOffset is only written by write func, by read by Read func. + // To avoid a race condition, all reads and updates to this variable must be + // done via atomics. + atomic.StoreUint32(&vlog.writableLogOffset, 0) + vlog.numEntriesWritten = 0 + + var err error + if lf.fd, err = y.CreateSyncedFile(path, vlog.opt.SyncWrites); err != nil { + return nil, errFile(err, lf.path, "Create value log file") + } + if err = syncDir(vlog.dirPath); err != nil { + return nil, errFile(err, vlog.dirPath, "Sync value log dir") + } + if err = lf.mmap(2 * vlog.opt.ValueLogFileSize); err != nil { + return nil, errFile(err, lf.path, "Mmap value log file") + } + + vlog.filesLock.Lock() + vlog.filesMap[fid] = lf + vlog.filesLock.Unlock() + + return lf, nil +} + +func errFile(err error, path string, msg string) error { + return fmt.Errorf("%s. Path=%s. Error=%v", msg, path, err) +} + +func (vlog *valueLog) replayLog(lf *logFile, offset uint32, replayFn logEntry) error { + // We should open the file in RW mode, so it can be truncated. + var err error + lf.fd, err = os.OpenFile(lf.path, os.O_RDWR, 0) + if err != nil { + return errFile(err, lf.path, "Open file in RW mode") + } + defer lf.fd.Close() + + fi, err := lf.fd.Stat() + if err != nil { + return errFile(err, lf.path, "Unable to run file.Stat") + } + + // Alright, let's iterate now. + endOffset, err := vlog.iterate(lf, offset, replayFn) + if err != nil { + return errFile(err, lf.path, "Unable to replay logfile") + } + if int64(endOffset) == fi.Size() { + return nil + } + + // End offset is different from file size. So, we should truncate the file + // to that size. + y.AssertTrue(int64(endOffset) <= fi.Size()) + if !vlog.opt.Truncate { + return ErrTruncateNeeded + } + + if err := lf.fd.Truncate(int64(endOffset)); err != nil { + return errFile(err, lf.path, fmt.Sprintf( + "Truncation needed at offset %d. Can be done manually as well.", endOffset)) + } + return nil +} + +func (vlog *valueLog) open(db *DB, ptr valuePointer, replayFn logEntry) error { + opt := db.opt + vlog.opt = opt + vlog.dirPath = opt.ValueDir + vlog.db = db + vlog.elog = trace.NewEventLog("Badger", "Valuelog") + vlog.garbageCh = make(chan struct{}, 1) // Only allow one GC at a time. + vlog.lfDiscardStats = &lfDiscardStats{m: make(map[uint32]int64)} + + if err := vlog.populateFilesMap(); err != nil { + return err + } + // If no files are found, then create a new file. + if len(vlog.filesMap) == 0 { + _, err := vlog.createVlogFile(0) + return err + } + + fids := vlog.sortedFids() + for _, fid := range fids { + lf, ok := vlog.filesMap[fid] + y.AssertTrue(ok) + + // This file is before the value head pointer. So, we don't need to + // replay it, and can just open it in readonly mode. + if fid < ptr.Fid { + if err := lf.openReadOnly(); err != nil { + return err + } + continue + } + + var offset uint32 + if fid == ptr.Fid { + offset = ptr.Offset + ptr.Len + } + vlog.db.opt.Infof("Replaying file id: %d at offset: %d\n", fid, offset) + now := time.Now() + // Replay and possible truncation done. Now we can open the file as per + // user specified options. + if err := vlog.replayLog(lf, offset, replayFn); err != nil { + return err + } + vlog.db.opt.Infof("Replay took: %s\n", time.Since(now)) + + if fid < vlog.maxFid { + if err := lf.openReadOnly(); err != nil { + return err + } + } else { + var flags uint32 + switch { + case vlog.opt.ReadOnly: + // If we have read only, we don't need SyncWrites. + flags |= y.ReadOnly + case vlog.opt.SyncWrites: + flags |= y.Sync + } + var err error + if lf.fd, err = y.OpenExistingFile(vlog.fpath(fid), flags); err != nil { + return errFile(err, lf.path, "Open existing file") + } + } + } + + // Seek to the end to start writing. + last, ok := vlog.filesMap[vlog.maxFid] + y.AssertTrue(ok) + lastOffset, err := last.fd.Seek(0, io.SeekEnd) + if err != nil { + return errFile(err, last.path, "file.Seek to end") + } + vlog.writableLogOffset = uint32(lastOffset) + + // Update the head to point to the updated tail. Otherwise, even after doing a successful + // replay and closing the DB, the value log head does not get updated, which causes the replay + // to happen repeatedly. + vlog.db.vhead = valuePointer{Fid: vlog.maxFid, Offset: uint32(lastOffset)} + + // Map the file if needed. When we create a file, it is automatically mapped. + if err = last.mmap(2 * opt.ValueLogFileSize); err != nil { + return errFile(err, last.path, "Map log file") + } + return nil +} + +func (vlog *valueLog) Close() error { + vlog.elog.Printf("Stopping garbage collection of values.") + defer vlog.elog.Finish() + + var err error + for id, f := range vlog.filesMap { + f.lock.Lock() // We won’t release the lock. + if munmapErr := f.munmap(); munmapErr != nil && err == nil { + err = munmapErr + } + + maxFid := atomic.LoadUint32(&vlog.maxFid) + if !vlog.opt.ReadOnly && id == maxFid { + // truncate writable log file to correct offset. + if truncErr := f.fd.Truncate( + int64(vlog.woffset())); truncErr != nil && err == nil { + err = truncErr + } + } + + if closeErr := f.fd.Close(); closeErr != nil && err == nil { + err = closeErr + } + } + return err +} + +// sortedFids returns the file id's not pending deletion, sorted. Assumes we have shared access to +// filesMap. +func (vlog *valueLog) sortedFids() []uint32 { + toBeDeleted := make(map[uint32]struct{}) + for _, fid := range vlog.filesToBeDeleted { + toBeDeleted[fid] = struct{}{} + } + ret := make([]uint32, 0, len(vlog.filesMap)) + for fid := range vlog.filesMap { + if _, ok := toBeDeleted[fid]; !ok { + ret = append(ret, fid) + } + } + sort.Slice(ret, func(i, j int) bool { + return ret[i] < ret[j] + }) + return ret +} + +type request struct { + // Input values + Entries []*Entry + // Output values and wait group stuff below + Ptrs []valuePointer + Wg sync.WaitGroup + Err error +} + +func (req *request) Wait() error { + req.Wg.Wait() + req.Entries = nil + err := req.Err + requestPool.Put(req) + return err +} + +// sync is thread-unsafe and should not be called concurrently with write. +func (vlog *valueLog) sync() error { + if vlog.opt.SyncWrites { + return nil + } + + vlog.filesLock.RLock() + if len(vlog.filesMap) == 0 { + vlog.filesLock.RUnlock() + return nil + } + maxFid := atomic.LoadUint32(&vlog.maxFid) + curlf := vlog.filesMap[maxFid] + curlf.lock.RLock() + vlog.filesLock.RUnlock() + + dirSyncCh := make(chan error) + go func() { dirSyncCh <- syncDir(vlog.opt.ValueDir) }() + err := curlf.sync() + curlf.lock.RUnlock() + dirSyncErr := <-dirSyncCh + if err != nil { + err = dirSyncErr + } + return err +} + +func (vlog *valueLog) woffset() uint32 { + return atomic.LoadUint32(&vlog.writableLogOffset) +} + +// write is thread-unsafe by design and should not be called concurrently. +func (vlog *valueLog) write(reqs []*request) error { + vlog.filesLock.RLock() + maxFid := atomic.LoadUint32(&vlog.maxFid) + curlf := vlog.filesMap[maxFid] + vlog.filesLock.RUnlock() + + var buf bytes.Buffer + toDisk := func() error { + if buf.Len() == 0 { + return nil + } + vlog.elog.Printf("Flushing %d blocks of total size: %d", len(reqs), buf.Len()) + n, err := curlf.fd.Write(buf.Bytes()) + if err != nil { + return errors.Wrapf(err, "Unable to write to value log file: %q", curlf.path) + } + buf.Reset() + y.NumWrites.Add(1) + y.NumBytesWritten.Add(int64(n)) + vlog.elog.Printf("Done") + atomic.AddUint32(&vlog.writableLogOffset, uint32(n)) + + if vlog.woffset() > uint32(vlog.opt.ValueLogFileSize) || + vlog.numEntriesWritten > vlog.opt.ValueLogMaxEntries { + var err error + if err = curlf.doneWriting(vlog.woffset()); err != nil { + return err + } + + newid := atomic.AddUint32(&vlog.maxFid, 1) + y.AssertTruef(newid > 0, "newid has overflown uint32: %v", newid) + newlf, err := vlog.createVlogFile(newid) + if err != nil { + return err + } + curlf = newlf + } + return nil + } + + for i := range reqs { + b := reqs[i] + b.Ptrs = b.Ptrs[:0] + for j := range b.Entries { + e := b.Entries[j] + var p valuePointer + + p.Fid = curlf.fid + // Use the offset including buffer length so far. + p.Offset = vlog.woffset() + uint32(buf.Len()) + plen, err := encodeEntry(e, &buf) // Now encode the entry into buffer. + if err != nil { + return err + } + p.Len = uint32(plen) + b.Ptrs = append(b.Ptrs, p) + } + vlog.numEntriesWritten += uint32(len(b.Entries)) + // We write to disk here so that all entries that are part of the same transaction are + // written to the same vlog file. + writeNow := + vlog.woffset()+uint32(buf.Len()) > uint32(vlog.opt.ValueLogFileSize) || + vlog.numEntriesWritten > uint32(vlog.opt.ValueLogMaxEntries) + if writeNow { + if err := toDisk(); err != nil { + return err + } + } + } + return toDisk() +} + +// Gets the logFile and acquires and RLock() for the mmap. You must call RUnlock on the file +// (if non-nil) +func (vlog *valueLog) getFileRLocked(fid uint32) (*logFile, error) { + vlog.filesLock.RLock() + defer vlog.filesLock.RUnlock() + ret, ok := vlog.filesMap[fid] + if !ok { + // log file has gone away, will need to retry the operation. + return nil, ErrRetry + } + ret.lock.RLock() + return ret, nil +} + +// Read reads the value log at a given location. +// TODO: Make this read private. +func (vlog *valueLog) Read(vp valuePointer, s *y.Slice) ([]byte, func(), error) { + // Check for valid offset if we are reading to writable log. + maxFid := atomic.LoadUint32(&vlog.maxFid) + if vp.Fid == maxFid && vp.Offset >= vlog.woffset() { + return nil, nil, errors.Errorf( + "Invalid value pointer offset: %d greater than current offset: %d", + vp.Offset, vlog.woffset()) + } + + buf, cb, err := vlog.readValueBytes(vp, s) + if err != nil { + return nil, cb, err + } + var h header + h.Decode(buf) + n := uint32(headerBufSize) + h.klen + return buf[n : n+h.vlen], cb, nil +} + +func (vlog *valueLog) readValueBytes(vp valuePointer, s *y.Slice) ([]byte, func(), error) { + lf, err := vlog.getFileRLocked(vp.Fid) + if err != nil { + return nil, nil, err + } + + buf, err := lf.read(vp, s) + if vlog.opt.ValueLogLoadingMode == options.MemoryMap { + return buf, lf.lock.RUnlock, err + } + // If we are using File I/O we unlock the file immediately + // and return an empty function as callback. + lf.lock.RUnlock() + return buf, nil, err +} + +// Test helper +func valueBytesToEntry(buf []byte) (e Entry) { + var h header + h.Decode(buf) + n := uint32(headerBufSize) + + e.Key = buf[n : n+h.klen] + n += h.klen + e.meta = h.meta + e.UserMeta = h.userMeta + e.Value = buf[n : n+h.vlen] + return +} + +func (vlog *valueLog) pickLog(head valuePointer, tr trace.Trace) (files []*logFile) { + vlog.filesLock.RLock() + defer vlog.filesLock.RUnlock() + fids := vlog.sortedFids() + if len(fids) <= 1 { + tr.LazyPrintf("Only one or less value log file.") + return nil + } else if head.Fid == 0 { + tr.LazyPrintf("Head pointer is at zero.") + return nil + } + + // Pick a candidate that contains the largest amount of discardable data + candidate := struct { + fid uint32 + discard int64 + }{math.MaxUint32, 0} + vlog.lfDiscardStats.Lock() + for _, fid := range fids { + if fid >= head.Fid { + break + } + if vlog.lfDiscardStats.m[fid] > candidate.discard { + candidate.fid = fid + candidate.discard = vlog.lfDiscardStats.m[fid] + } + } + vlog.lfDiscardStats.Unlock() + + if candidate.fid != math.MaxUint32 { // Found a candidate + tr.LazyPrintf("Found candidate via discard stats: %v", candidate) + files = append(files, vlog.filesMap[candidate.fid]) + } else { + tr.LazyPrintf("Could not find candidate via discard stats. Randomly picking one.") + } + + // Fallback to randomly picking a log file + var idxHead int + for i, fid := range fids { + if fid == head.Fid { + idxHead = i + break + } + } + if idxHead == 0 { // Not found or first file + tr.LazyPrintf("Could not find any file.") + return nil + } + idx := rand.Intn(idxHead) // Don’t include head.Fid. We pick a random file before it. + if idx > 0 { + idx = rand.Intn(idx + 1) // Another level of rand to favor smaller fids. + } + tr.LazyPrintf("Randomly chose fid: %d", fids[idx]) + files = append(files, vlog.filesMap[fids[idx]]) + return files +} + +func discardEntry(e Entry, vs y.ValueStruct) bool { + if vs.Version != y.ParseTs(e.Key) { + // Version not found. Discard. + return true + } + if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) { + return true + } + if (vs.Meta & bitValuePointer) == 0 { + // Key also stores the value in LSM. Discard. + return true + } + if (vs.Meta & bitFinTxn) > 0 { + // Just a txn finish entry. Discard. + return true + } + return false +} + +func (vlog *valueLog) doRunGC(lf *logFile, discardRatio float64, tr trace.Trace) (err error) { + // Update stats before exiting + defer func() { + if err == nil { + vlog.lfDiscardStats.Lock() + delete(vlog.lfDiscardStats.m, lf.fid) + vlog.lfDiscardStats.Unlock() + } + }() + + type reason struct { + total float64 + discard float64 + count int + } + + fi, err := lf.fd.Stat() + if err != nil { + tr.LazyPrintf("Error while finding file size: %v", err) + tr.SetError() + return err + } + + // Set up the sampling window sizes. + sizeWindow := float64(fi.Size()) * 0.1 // 10% of the file as window. + countWindow := int(float64(vlog.opt.ValueLogMaxEntries) * 0.01) // 1% of num entries. + tr.LazyPrintf("Size window: %5.2f. Count window: %d.", sizeWindow, countWindow) + + // Pick a random start point for the log. + skipFirstM := float64(rand.Int63n(fi.Size())) // Pick a random starting location. + skipFirstM -= sizeWindow // Avoid hitting EOF by moving back by window. + skipFirstM /= float64(mi) // Convert to MBs. + tr.LazyPrintf("Skip first %5.2f MB of file of size: %d MB", skipFirstM, fi.Size()/mi) + var skipped float64 + + var r reason + start := time.Now() + y.AssertTrue(vlog.db != nil) + s := new(y.Slice) + var numIterations int + _, err = vlog.iterate(lf, 0, func(e Entry, vp valuePointer) error { + numIterations++ + esz := float64(vp.Len) / (1 << 20) // in MBs. + if skipped < skipFirstM { + skipped += esz + return nil + } + + // Sample until we reach the window sizes or exceed 10 seconds. + if r.count > countWindow { + tr.LazyPrintf("Stopping sampling after %d entries.", countWindow) + return errStop + } + if r.total > sizeWindow { + tr.LazyPrintf("Stopping sampling after reaching window size.") + return errStop + } + if time.Since(start) > 10*time.Second { + tr.LazyPrintf("Stopping sampling after 10 seconds.") + return errStop + } + r.total += esz + r.count++ + + vs, err := vlog.db.get(e.Key) + if err != nil { + return err + } + if discardEntry(e, vs) { + r.discard += esz + return nil + } + + // Value is still present in value log. + y.AssertTrue(len(vs.Value) > 0) + vp.Decode(vs.Value) + + if vp.Fid > lf.fid { + // Value is present in a later log. Discard. + r.discard += esz + return nil + } + if vp.Offset > e.offset { + // Value is present in a later offset, but in the same log. + r.discard += esz + return nil + } + if vp.Fid == lf.fid && vp.Offset == e.offset { + // This is still the active entry. This would need to be rewritten. + + } else { + vlog.elog.Printf("Reason=%+v\n", r) + + buf, cb, err := vlog.readValueBytes(vp, s) + if err != nil { + return errStop + } + ne := valueBytesToEntry(buf) + ne.offset = vp.Offset + ne.print("Latest Entry Header in LSM") + e.print("Latest Entry in Log") + runCallback(cb) + return errors.Errorf("This shouldn't happen. Latest Pointer:%+v. Meta:%v.", + vp, vs.Meta) + } + return nil + }) + + if err != nil { + tr.LazyPrintf("Error while iterating for RunGC: %v", err) + tr.SetError() + return err + } + tr.LazyPrintf("Fid: %d. Skipped: %5.2fMB Num iterations: %d. Data status=%+v\n", + lf.fid, skipped, numIterations, r) + + // If we couldn't sample at least a 1000 KV pairs or at least 75% of the window size, + // and what we can discard is below the threshold, we should skip the rewrite. + if (r.count < countWindow && r.total < sizeWindow*0.75) || r.discard < discardRatio*r.total { + tr.LazyPrintf("Skipping GC on fid: %d", lf.fid) + return ErrNoRewrite + } + if err = vlog.rewrite(lf, tr); err != nil { + return err + } + tr.LazyPrintf("Done rewriting.") + return nil +} + +func (vlog *valueLog) waitOnGC(lc *y.Closer) { + defer lc.Done() + + <-lc.HasBeenClosed() // Wait for lc to be closed. + + // Block any GC in progress to finish, and don't allow any more writes to runGC by filling up + // the channel of size 1. + vlog.garbageCh <- struct{}{} +} + +func (vlog *valueLog) runGC(discardRatio float64, head valuePointer) error { + select { + case vlog.garbageCh <- struct{}{}: + // Pick a log file for GC. + tr := trace.New("Badger.ValueLog", "GC") + tr.SetMaxEvents(100) + defer func() { + tr.Finish() + <-vlog.garbageCh + }() + + var err error + files := vlog.pickLog(head, tr) + if len(files) == 0 { + tr.LazyPrintf("PickLog returned zero results.") + return ErrNoRewrite + } + tried := make(map[uint32]bool) + for _, lf := range files { + if _, done := tried[lf.fid]; done { + continue + } + tried[lf.fid] = true + err = vlog.doRunGC(lf, discardRatio, tr) + if err == nil { + return vlog.deleteMoveKeysFor(lf.fid, tr) + } + } + return err + default: + return ErrRejected + } +} + +func (vlog *valueLog) updateGCStats(stats map[uint32]int64) { + vlog.lfDiscardStats.Lock() + for fid, sz := range stats { + vlog.lfDiscardStats.m[fid] += sz + } + vlog.lfDiscardStats.Unlock() +} diff --git a/vendor/github.com/dgraph-io/badger/value_test.go b/vendor/github.com/dgraph-io/badger/value_test.go new file mode 100644 index 00000000..b32d583c --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/value_test.go @@ -0,0 +1,880 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +import ( + "fmt" + "io/ioutil" + "math/rand" + "os" + "sync" + "testing" + + "github.com/stretchr/testify/require" + humanize "gx/ipfs/QmQMxG9D52TirZd9eLA37nxiNspnMRkKbyPWrVAa1gvtSy/go-humanize" + "gx/ipfs/QmRvYNctevGUW52urgmoFZscT6buMKqhHezLUS64WepGWn/go-net/trace" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/options" + "gx/ipfs/QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd/badger/y" +) + +func TestValueBasic(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + y.Check(err) + defer os.RemoveAll(dir) + + kv, _ := Open(getTestOptions(dir)) + defer kv.Close() + log := &kv.vlog + + // Use value big enough that the value log writes them even if SyncWrites is false. + const val1 = "sampleval012345678901234567890123" + const val2 = "samplevalb012345678901234567890123" + require.True(t, len(val1) >= kv.opt.ValueThreshold) + + e := &Entry{ + Key: []byte("samplekey"), + Value: []byte(val1), + meta: bitValuePointer, + } + e2 := &Entry{ + Key: []byte("samplekeyb"), + Value: []byte(val2), + meta: bitValuePointer, + } + + b := new(request) + b.Entries = []*Entry{e, e2} + + log.write([]*request{b}) + require.Len(t, b.Ptrs, 2) + t.Logf("Pointer written: %+v %+v\n", b.Ptrs[0], b.Ptrs[1]) + + s := new(y.Slice) + buf1, cb1, err1 := log.readValueBytes(b.Ptrs[0], s) + buf2, cb2, err2 := log.readValueBytes(b.Ptrs[1], s) + require.NoError(t, err1) + require.NoError(t, err2) + defer runCallback(cb1) + defer runCallback(cb2) + + readEntries := []Entry{valueBytesToEntry(buf1), valueBytesToEntry(buf2)} + require.EqualValues(t, []Entry{ + { + Key: []byte("samplekey"), + Value: []byte(val1), + meta: bitValuePointer, + }, + { + Key: []byte("samplekeyb"), + Value: []byte(val2), + meta: bitValuePointer, + }, + }, readEntries) + +} + +func TestValueGCManaged(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + + N := 10000 + opt := getTestOptions(dir) + opt.ValueLogMaxEntries = uint32(N / 10) + opt.managedTxns = true + db, err := Open(opt) + require.NoError(t, err) + defer db.Close() + + var ts uint64 + newTs := func() uint64 { + ts++ + return ts + } + + sz := 64 << 10 + var wg sync.WaitGroup + for i := 0; i < N; i++ { + v := make([]byte, sz) + rand.Read(v[:rand.Intn(sz)]) + + wg.Add(1) + txn := db.NewTransactionAt(newTs(), true) + require.NoError(t, txn.Set([]byte(fmt.Sprintf("key%d", i)), v)) + require.NoError(t, txn.CommitAt(newTs(), func(err error) { + wg.Done() + require.NoError(t, err) + })) + } + + for i := 0; i < N; i++ { + wg.Add(1) + txn := db.NewTransactionAt(newTs(), true) + require.NoError(t, txn.Delete([]byte(fmt.Sprintf("key%d", i)))) + require.NoError(t, txn.CommitAt(newTs(), func(err error) { + wg.Done() + require.NoError(t, err) + })) + } + wg.Wait() + files, err := ioutil.ReadDir(dir) + require.NoError(t, err) + for _, fi := range files { + t.Logf("File: %s. Size: %s\n", fi.Name(), humanize.Bytes(uint64(fi.Size()))) + } + + for i := 0; i < 100; i++ { + // Try at max 100 times to GC even a single value log file. + if err := db.RunValueLogGC(0.0001); err == nil { + return // Done + } + } + require.Fail(t, "Unable to GC even a single value log file.") +} + +func TestValueGC(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + opt := getTestOptions(dir) + opt.ValueLogFileSize = 1 << 20 + + kv, _ := Open(opt) + defer kv.Close() + + sz := 32 << 10 + txn := kv.NewTransaction(true) + for i := 0; i < 100; i++ { + v := make([]byte, sz) + rand.Read(v[:rand.Intn(sz)]) + require.NoError(t, txn.Set([]byte(fmt.Sprintf("key%d", i)), v)) + if i%20 == 0 { + require.NoError(t, txn.Commit()) + txn = kv.NewTransaction(true) + } + } + require.NoError(t, txn.Commit()) + + for i := 0; i < 45; i++ { + txnDelete(t, kv, []byte(fmt.Sprintf("key%d", i))) + } + + kv.vlog.filesLock.RLock() + lf := kv.vlog.filesMap[kv.vlog.sortedFids()[0]] + kv.vlog.filesLock.RUnlock() + + // lf.iterate(0, func(e Entry) bool { + // e.print("lf") + // return true + // }) + + tr := trace.New("Test", "Test") + defer tr.Finish() + kv.vlog.rewrite(lf, tr) + for i := 45; i < 100; i++ { + key := []byte(fmt.Sprintf("key%d", i)) + + require.NoError(t, kv.View(func(txn *Txn) error { + item, err := txn.Get(key) + require.NoError(t, err) + val := getItemValue(t, item) + require.NotNil(t, val) + require.True(t, len(val) == sz, "Size found: %d", len(val)) + return nil + })) + } +} + +func TestValueGC2(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + opt := getTestOptions(dir) + opt.ValueLogFileSize = 1 << 20 + + kv, _ := Open(opt) + defer kv.Close() + + sz := 32 << 10 + txn := kv.NewTransaction(true) + for i := 0; i < 100; i++ { + v := make([]byte, sz) + rand.Read(v[:rand.Intn(sz)]) + require.NoError(t, txn.Set([]byte(fmt.Sprintf("key%d", i)), v)) + if i%20 == 0 { + require.NoError(t, txn.Commit()) + txn = kv.NewTransaction(true) + } + } + require.NoError(t, txn.Commit()) + + for i := 0; i < 5; i++ { + txnDelete(t, kv, []byte(fmt.Sprintf("key%d", i))) + } + + for i := 5; i < 10; i++ { + v := []byte(fmt.Sprintf("value%d", i)) + txnSet(t, kv, []byte(fmt.Sprintf("key%d", i)), v, 0) + } + + kv.vlog.filesLock.RLock() + lf := kv.vlog.filesMap[kv.vlog.sortedFids()[0]] + kv.vlog.filesLock.RUnlock() + + // lf.iterate(0, func(e Entry) bool { + // e.print("lf") + // return true + // }) + + tr := trace.New("Test", "Test") + defer tr.Finish() + kv.vlog.rewrite(lf, tr) + for i := 0; i < 5; i++ { + key := []byte(fmt.Sprintf("key%d", i)) + require.NoError(t, kv.View(func(txn *Txn) error { + _, err := txn.Get(key) + require.Equal(t, ErrKeyNotFound, err) + return nil + })) + } + for i := 5; i < 10; i++ { + key := []byte(fmt.Sprintf("key%d", i)) + require.NoError(t, kv.View(func(txn *Txn) error { + item, err := txn.Get(key) + require.NoError(t, err) + val := getItemValue(t, item) + require.NotNil(t, val) + require.Equal(t, string(val), fmt.Sprintf("value%d", i)) + return nil + })) + } + for i := 10; i < 100; i++ { + key := []byte(fmt.Sprintf("key%d", i)) + require.NoError(t, kv.View(func(txn *Txn) error { + item, err := txn.Get(key) + require.NoError(t, err) + val := getItemValue(t, item) + require.NotNil(t, val) + require.True(t, len(val) == sz, "Size found: %d", len(val)) + return nil + })) + } +} + +func TestValueGC3(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + opt := getTestOptions(dir) + opt.ValueLogFileSize = 1 << 20 + + kv, err := Open(opt) + require.NoError(t, err) + defer kv.Close() + + // We want to test whether an iterator can continue through a value log GC. + + valueSize := 32 << 10 + + var value3 []byte + txn := kv.NewTransaction(true) + for i := 0; i < 100; i++ { + v := make([]byte, valueSize) // 32K * 100 will take >=3'276'800 B. + if i == 3 { + value3 = v + } + rand.Read(v[:]) + // Keys key000, key001, key002, such that sorted order matches insertion order + require.NoError(t, txn.Set([]byte(fmt.Sprintf("key%03d", i)), v)) + if i%20 == 0 { + require.NoError(t, txn.Commit()) + txn = kv.NewTransaction(true) + } + } + require.NoError(t, txn.Commit()) + + // Start an iterator to keys in the first value log file + itOpt := IteratorOptions{ + PrefetchValues: false, + PrefetchSize: 0, + Reverse: false, + } + + txn = kv.NewTransaction(true) + it := txn.NewIterator(itOpt) + defer it.Close() + // Walk a few keys + it.Rewind() + require.True(t, it.Valid()) + item := it.Item() + require.Equal(t, []byte("key000"), item.Key()) + it.Next() + require.True(t, it.Valid()) + item = it.Item() + require.Equal(t, []byte("key001"), item.Key()) + it.Next() + require.True(t, it.Valid()) + item = it.Item() + require.Equal(t, []byte("key002"), item.Key()) + + // Like other tests, we pull out a logFile to rewrite it directly + + kv.vlog.filesLock.RLock() + logFile := kv.vlog.filesMap[kv.vlog.sortedFids()[0]] + kv.vlog.filesLock.RUnlock() + + tr := trace.New("Test", "Test") + defer tr.Finish() + kv.vlog.rewrite(logFile, tr) + it.Next() + require.True(t, it.Valid()) + item = it.Item() + require.Equal(t, []byte("key003"), item.Key()) + + v3, err := item.ValueCopy(nil) + require.NoError(t, err) + require.Equal(t, value3, v3) +} + +func TestValueGC4(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + opt := getTestOptions(dir) + opt.ValueLogFileSize = 1 << 20 + opt.Truncate = true + + kv, err := Open(opt) + require.NoError(t, err) + defer kv.Close() + + sz := 128 << 10 // 5 entries per value log file. + txn := kv.NewTransaction(true) + for i := 0; i < 24; i++ { + v := make([]byte, sz) + rand.Read(v[:rand.Intn(sz)]) + require.NoError(t, txn.Set([]byte(fmt.Sprintf("key%d", i)), v)) + if i%3 == 0 { + require.NoError(t, txn.Commit()) + txn = kv.NewTransaction(true) + } + } + require.NoError(t, txn.Commit()) + + for i := 0; i < 8; i++ { + txnDelete(t, kv, []byte(fmt.Sprintf("key%d", i))) + } + + for i := 8; i < 16; i++ { + v := []byte(fmt.Sprintf("value%d", i)) + txnSet(t, kv, []byte(fmt.Sprintf("key%d", i)), v, 0) + } + + kv.vlog.filesLock.RLock() + lf0 := kv.vlog.filesMap[kv.vlog.sortedFids()[0]] + lf1 := kv.vlog.filesMap[kv.vlog.sortedFids()[1]] + kv.vlog.filesLock.RUnlock() + + // lf.iterate(0, func(e Entry) bool { + // e.print("lf") + // return true + // }) + + tr := trace.New("Test", "Test") + defer tr.Finish() + kv.vlog.rewrite(lf0, tr) + kv.vlog.rewrite(lf1, tr) + + err = kv.vlog.Close() + require.NoError(t, err) + + err = kv.vlog.open(kv, valuePointer{Fid: 2}, kv.replayFunction()) + require.NoError(t, err) + + for i := 0; i < 8; i++ { + key := []byte(fmt.Sprintf("key%d", i)) + require.NoError(t, kv.View(func(txn *Txn) error { + _, err := txn.Get(key) + require.Equal(t, ErrKeyNotFound, err) + return nil + })) + } + for i := 8; i < 16; i++ { + key := []byte(fmt.Sprintf("key%d", i)) + require.NoError(t, kv.View(func(txn *Txn) error { + item, err := txn.Get(key) + require.NoError(t, err) + val := getItemValue(t, item) + require.NotNil(t, val) + require.Equal(t, string(val), fmt.Sprintf("value%d", i)) + return nil + })) + } +} + +func TestChecksums(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + + // Set up SST with K1=V1 + opts := getTestOptions(dir) + opts.Truncate = true + opts.ValueLogFileSize = 100 * 1024 * 1024 // 100Mb + kv, err := Open(opts) + require.NoError(t, err) + require.NoError(t, kv.Close()) + + var ( + k0 = []byte("k0") + k1 = []byte("k1") + k2 = []byte("k2") + k3 = []byte("k3") + v0 = []byte("value0-012345678901234567890123012345678901234567890123") + v1 = []byte("value1-012345678901234567890123012345678901234567890123") + v2 = []byte("value2-012345678901234567890123012345678901234567890123") + v3 = []byte("value3-012345678901234567890123012345678901234567890123") + ) + // Make sure the value log would actually store the item + require.True(t, len(v0) >= kv.opt.ValueThreshold) + + // Use a vlog with K0=V0 and a (corrupted) second transaction(k1,k2) + buf := createVlog(t, []*Entry{ + {Key: k0, Value: v0}, + {Key: k1, Value: v1}, + {Key: k2, Value: v2}, + }) + buf[len(buf)-1]++ // Corrupt last byte + require.NoError(t, ioutil.WriteFile(vlogFilePath(dir, 0), buf, 0777)) + + // K1 should exist, but K2 shouldn't. + kv, err = Open(opts) + require.NoError(t, err) + + require.NoError(t, kv.View(func(txn *Txn) error { + item, err := txn.Get(k0) + require.NoError(t, err) + require.Equal(t, getItemValue(t, item), v0) + + _, err = txn.Get(k1) + require.Equal(t, ErrKeyNotFound, err) + + _, err = txn.Get(k2) + require.Equal(t, ErrKeyNotFound, err) + return nil + })) + + // Write K3 at the end of the vlog. + txnSet(t, kv, k3, v3, 0) + require.NoError(t, kv.Close()) + + // The vlog should contain K0 and K3 (K1 and k2 was lost when Badger started up + // last due to checksum failure). + kv, err = Open(opts) + require.NoError(t, err) + + { + txn := kv.NewTransaction(false) + + iter := txn.NewIterator(DefaultIteratorOptions) + iter.Seek(k0) + require.True(t, iter.Valid()) + it := iter.Item() + require.Equal(t, it.Key(), k0) + require.Equal(t, getItemValue(t, it), v0) + iter.Next() + require.True(t, iter.Valid()) + it = iter.Item() + require.Equal(t, it.Key(), k3) + require.Equal(t, getItemValue(t, it), v3) + + iter.Close() + txn.Discard() + } + + require.NoError(t, kv.Close()) +} + +func TestPartialAppendToValueLog(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + + // Create skeleton files. + opts := getTestOptions(dir) + opts.Truncate = true + opts.ValueLogFileSize = 100 * 1024 * 1024 // 100Mb + kv, err := Open(opts) + require.NoError(t, err) + require.NoError(t, kv.Close()) + + var ( + k0 = []byte("k0") + k1 = []byte("k1") + k2 = []byte("k2") + k3 = []byte("k3") + v0 = []byte("value0-01234567890123456789012012345678901234567890123") + v1 = []byte("value1-01234567890123456789012012345678901234567890123") + v2 = []byte("value2-01234567890123456789012012345678901234567890123") + v3 = []byte("value3-01234567890123456789012012345678901234567890123") + ) + // Values need to be long enough to actually get written to value log. + require.True(t, len(v3) >= kv.opt.ValueThreshold) + + // Create truncated vlog to simulate a partial append. + // k0 - single transaction, k1 and k2 in another transaction + buf := createVlog(t, []*Entry{ + {Key: k0, Value: v0}, + {Key: k1, Value: v1}, + {Key: k2, Value: v2}, + }) + buf = buf[:len(buf)-6] + require.NoError(t, ioutil.WriteFile(vlogFilePath(dir, 0), buf, 0777)) + + // Badger should now start up + kv, err = Open(opts) + require.NoError(t, err) + + require.NoError(t, kv.View(func(txn *Txn) error { + item, err := txn.Get(k0) + require.NoError(t, err) + require.Equal(t, v0, getItemValue(t, item)) + + _, err = txn.Get(k1) + require.Equal(t, ErrKeyNotFound, err) + _, err = txn.Get(k2) + require.Equal(t, ErrKeyNotFound, err) + return nil + })) + + // When K3 is set, it should be persisted after a restart. + txnSet(t, kv, k3, v3, 0) + require.NoError(t, kv.Close()) + kv, err = Open(opts) + require.NoError(t, err) + checkKeys(t, kv, [][]byte{k3}) + + // Replay value log from beginning, badger head is past k2. + require.NoError(t, kv.vlog.Close()) + require.NoError(t, + kv.vlog.open(kv, valuePointer{Fid: 0}, kv.replayFunction())) + require.NoError(t, kv.Close()) +} + +func TestReadOnlyOpenWithPartialAppendToValueLog(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + + // Create skeleton files. + opts := getTestOptions(dir) + opts.ValueLogFileSize = 100 * 1024 * 1024 // 100Mb + kv, err := Open(opts) + require.NoError(t, err) + require.NoError(t, kv.Close()) + + var ( + k0 = []byte("k0") + k1 = []byte("k1") + k2 = []byte("k2") + v0 = []byte("value0-012345678901234567890123") + v1 = []byte("value1-012345678901234567890123") + v2 = []byte("value2-012345678901234567890123") + ) + + // Create truncated vlog to simulate a partial append. + // k0 - single transaction, k1 and k2 in another transaction + buf := createVlog(t, []*Entry{ + {Key: k0, Value: v0}, + {Key: k1, Value: v1}, + {Key: k2, Value: v2}, + }) + buf = buf[:len(buf)-6] + require.NoError(t, ioutil.WriteFile(vlogFilePath(dir, 0), buf, 0777)) + + opts.ReadOnly = true + // Badger should fail a read-only open with values to replay + kv, err = Open(opts) + require.Error(t, err) + require.Regexp(t, "Database was not properly closed, cannot open read-only|Read-only mode is not supported on Windows", err.Error()) +} + +func TestValueLogTrigger(t *testing.T) { + t.Skip("Difficult to trigger compaction, so skipping. Re-enable after fixing #226") + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + + opt := getTestOptions(dir) + opt.ValueLogFileSize = 1 << 20 + kv, err := Open(opt) + require.NoError(t, err) + + // Write a lot of data, so it creates some work for valug log GC. + sz := 32 << 10 + txn := kv.NewTransaction(true) + for i := 0; i < 100; i++ { + v := make([]byte, sz) + rand.Read(v[:rand.Intn(sz)]) + require.NoError(t, txn.Set([]byte(fmt.Sprintf("key%d", i)), v)) + if i%20 == 0 { + require.NoError(t, txn.Commit()) + txn = kv.NewTransaction(true) + } + } + require.NoError(t, txn.Commit()) + + for i := 0; i < 45; i++ { + txnDelete(t, kv, []byte(fmt.Sprintf("key%d", i))) + } + + require.NoError(t, kv.RunValueLogGC(0.5)) + + require.NoError(t, kv.Close()) + + err = kv.RunValueLogGC(0.5) + require.Equal(t, ErrRejected, err, "Error should be returned after closing DB.") +} + +func createVlog(t *testing.T, entries []*Entry) []byte { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + + opts := getTestOptions(dir) + opts.ValueLogFileSize = 100 * 1024 * 1024 // 100Mb + kv, err := Open(opts) + require.NoError(t, err) + txnSet(t, kv, entries[0].Key, entries[0].Value, entries[0].meta) + entries = entries[1:] + txn := kv.NewTransaction(true) + for _, entry := range entries { + require.NoError(t, txn.SetWithMeta(entry.Key, entry.Value, entry.meta)) + } + require.NoError(t, txn.Commit()) + require.NoError(t, kv.Close()) + + filename := vlogFilePath(dir, 0) + buf, err := ioutil.ReadFile(filename) + require.NoError(t, err) + return buf +} + +func TestPenultimateLogCorruption(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + require.NoError(t, err) + defer os.RemoveAll(dir) + opt := getTestOptions(dir) + opt.ValueLogLoadingMode = options.FileIO + // Each txn generates at least two entries. 3 txns will fit each file. + opt.ValueLogMaxEntries = 5 + + db0, err := Open(opt) + require.NoError(t, err) + + h := testHelper{db: db0, t: t} + h.writeRange(0, 7) + h.readRange(0, 7) + + for i := 2; i >= 0; i-- { + fpath := vlogFilePath(dir, uint32(i)) + fi, err := os.Stat(fpath) + require.NoError(t, err) + require.True(t, fi.Size() > 0, "Empty file at log=%d", i) + if i == 0 { + err := os.Truncate(fpath, fi.Size()-1) + require.NoError(t, err) + } + } + // Simulate a crash by not closing db0, but releasing the locks. + if db0.dirLockGuard != nil { + require.NoError(t, db0.dirLockGuard.release()) + } + if db0.valueDirGuard != nil { + require.NoError(t, db0.valueDirGuard.release()) + } + + opt.Truncate = true + db1, err := Open(opt) + require.NoError(t, err) + h.db = db1 + h.readRange(0, 1) // Only 2 should be gone, because it is at the end of logfile 0. + h.readRange(3, 7) + err = db1.View(func(txn *Txn) error { + _, err := txn.Get(h.key(2)) // Verify that 2 is gone. + require.Equal(t, ErrKeyNotFound, err) + return nil + }) + require.NoError(t, err) + require.NoError(t, db1.Close()) +} + +func checkKeys(t *testing.T, kv *DB, keys [][]byte) { + i := 0 + txn := kv.NewTransaction(false) + iter := txn.NewIterator(IteratorOptions{}) + for iter.Seek(keys[0]); iter.Valid(); iter.Next() { + require.Equal(t, iter.Item().Key(), keys[i]) + i++ + } + require.Equal(t, i, len(keys)) +} + +type testHelper struct { + db *DB + t *testing.T + val []byte +} + +func (th *testHelper) key(i int) []byte { + return []byte(fmt.Sprintf("%010d", i)) +} +func (th *testHelper) value() []byte { + if len(th.val) > 0 { + return th.val + } + th.val = make([]byte, 100) + y.Check2(rand.Read(th.val)) + return th.val +} + +// writeRange [from, to]. +func (th *testHelper) writeRange(from, to int) { + for i := from; i <= to; i++ { + err := th.db.Update(func(txn *Txn) error { + return txn.Set(th.key(i), th.value()) + }) + require.NoError(th.t, err) + } +} + +func (th *testHelper) readRange(from, to int) { + for i := from; i <= to; i++ { + err := th.db.View(func(txn *Txn) error { + item, err := txn.Get(th.key(i)) + if err != nil { + return err + } + return item.Value(func(val []byte) error { + require.Equal(th.t, val, th.value(), "key=%q", th.key(i)) + return nil + + }) + }) + require.NoError(th.t, err, "key=%q", th.key(i)) + } +} + +// Test Bug #578, which showed that if a value is moved during value log GC, an +// older version can end up at a higher level in the LSM tree than a newer +// version, causing the data to not be returned. +func TestBug578(t *testing.T) { + dir, err := ioutil.TempDir("", "badger") + y.Check(err) + defer os.RemoveAll(dir) + + opts := DefaultOptions + opts.Dir = dir + opts.ValueDir = dir + opts.ValueLogMaxEntries = 64 + opts.MaxTableSize = 1 << 13 + + db, err := Open(opts) + require.NoError(t, err) + + h := testHelper{db: db, t: t} + + // Let's run this whole thing a few times. + for j := 0; j < 10; j++ { + t.Logf("Cycle: %d\n", j) + h.writeRange(0, 32) + h.writeRange(0, 10) + h.writeRange(50, 72) + h.writeRange(40, 72) + h.writeRange(40, 72) + + // Run value log GC a few times. + for i := 0; i < 5; i++ { + db.RunValueLogGC(0.5) + } + h.readRange(0, 10) + } +} + +func BenchmarkReadWrite(b *testing.B) { + rwRatio := []float32{ + 0.1, 0.2, 0.5, 1.0, + } + valueSize := []int{ + 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, + } + + for _, vsz := range valueSize { + for _, rw := range rwRatio { + b.Run(fmt.Sprintf("%3.1f,%04d", rw, vsz), func(b *testing.B) { + dir, err := ioutil.TempDir("", "vlog-benchmark") + y.Check(err) + defer os.RemoveAll(dir) + + db, err := Open(getTestOptions(dir)) + y.Check(err) + + vl := &db.vlog + b.ResetTimer() + + for i := 0; i < b.N; i++ { + e := new(Entry) + e.Key = make([]byte, 16) + e.Value = make([]byte, vsz) + bl := new(request) + bl.Entries = []*Entry{e} + + var ptrs []valuePointer + + vl.write([]*request{bl}) + ptrs = append(ptrs, bl.Ptrs...) + + f := rand.Float32() + if f < rw { + vl.write([]*request{bl}) + + } else { + ln := len(ptrs) + if ln == 0 { + b.Fatalf("Zero length of ptrs") + } + idx := rand.Intn(ln) + s := new(y.Slice) + buf, cb, err := vl.readValueBytes(ptrs[idx], s) + if err != nil { + b.Fatalf("Benchmark Read: %v", err) + } + + e := valueBytesToEntry(buf) + if len(e.Key) != 16 { + b.Fatalf("Key is invalid") + } + if len(e.Value) != vsz { + b.Fatalf("Value is invalid") + } + cb() + } + } + }) + } + } +} diff --git a/vendor/github.com/dgraph-io/badger/y/error.go b/vendor/github.com/dgraph-io/badger/y/error.go new file mode 100644 index 00000000..4f341cab --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/y/error.go @@ -0,0 +1,83 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package y + +// This file contains some functions for error handling. Note that we are moving +// towards using x.Trace, i.e., rpc tracing using net/tracer. But for now, these +// functions are useful for simple checks logged on one machine. +// Some common use cases are: +// (1) You receive an error from external lib, and would like to check/log fatal. +// For this, use x.Check, x.Checkf. These will check for err != nil, which is +// more common in Go. If you want to check for boolean being true, use +// x.Assert, x.Assertf. +// (2) You receive an error from external lib, and would like to pass on with some +// stack trace information. In this case, use x.Wrap or x.Wrapf. +// (3) You want to generate a new error with stack trace info. Use x.Errorf. + +import ( + "fmt" + "log" + + "gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors" +) + +var debugMode = true + +// Check logs fatal if err != nil. +func Check(err error) { + if err != nil { + log.Fatalf("%+v", Wrap(err)) + } +} + +// Check2 acts as convenience wrapper around Check, using the 2nd argument as error. +func Check2(_ interface{}, err error) { + Check(err) +} + +// AssertTrue asserts that b is true. Otherwise, it would log fatal. +func AssertTrue(b bool) { + if !b { + log.Fatalf("%+v", errors.Errorf("Assert failed")) + } +} + +// AssertTruef is AssertTrue with extra info. +func AssertTruef(b bool, format string, args ...interface{}) { + if !b { + log.Fatalf("%+v", errors.Errorf(format, args...)) + } +} + +// Wrap wraps errors from external lib. +func Wrap(err error) error { + if !debugMode { + return err + } + return errors.Wrap(err, "") +} + +// Wrapf is Wrap with extra info. +func Wrapf(err error, format string, args ...interface{}) error { + if !debugMode { + if err == nil { + return nil + } + return fmt.Errorf(format+" error: %+v", append(args, err)...) + } + return errors.Wrapf(err, format, args...) +} diff --git a/vendor/github.com/dgraph-io/badger/y/file_dsync.go b/vendor/github.com/dgraph-io/badger/y/file_dsync.go new file mode 100644 index 00000000..10b8c9ca --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/y/file_dsync.go @@ -0,0 +1,25 @@ +// +build !dragonfly,!freebsd,!windows + +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package y + +import "gx/ipfs/QmVGjyM9i2msKvLXwh9VosCTgP4mL91kC7hDmqnwTTx6Hu/sys/unix" + +func init() { + datasyncFileFlag = unix.O_DSYNC +} diff --git a/vendor/github.com/dgraph-io/badger/y/file_nodsync.go b/vendor/github.com/dgraph-io/badger/y/file_nodsync.go new file mode 100644 index 00000000..b68be7ab --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/y/file_nodsync.go @@ -0,0 +1,25 @@ +// +build dragonfly freebsd windows + +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package y + +import "syscall" + +func init() { + datasyncFileFlag = syscall.O_SYNC +} diff --git a/vendor/github.com/dgraph-io/badger/y/iterator.go b/vendor/github.com/dgraph-io/badger/y/iterator.go new file mode 100644 index 00000000..c6eb9f0b --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/y/iterator.go @@ -0,0 +1,264 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package y + +import ( + "bytes" + "container/heap" + "encoding/binary" + + "gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors" +) + +// ValueStruct represents the value info that can be associated with a key, but also the internal +// Meta field. +type ValueStruct struct { + Meta byte + UserMeta byte + ExpiresAt uint64 + Value []byte + + Version uint64 // This field is not serialized. Only for internal usage. +} + +func sizeVarint(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// EncodedSize is the size of the ValueStruct when encoded +func (v *ValueStruct) EncodedSize() uint16 { + sz := len(v.Value) + 2 // meta, usermeta. + if v.ExpiresAt == 0 { + return uint16(sz + 1) + } + + enc := sizeVarint(v.ExpiresAt) + return uint16(sz + enc) +} + +// Decode uses the length of the slice to infer the length of the Value field. +func (v *ValueStruct) Decode(b []byte) { + v.Meta = b[0] + v.UserMeta = b[1] + var sz int + v.ExpiresAt, sz = binary.Uvarint(b[2:]) + v.Value = b[2+sz:] +} + +// Encode expects a slice of length at least v.EncodedSize(). +func (v *ValueStruct) Encode(b []byte) { + b[0] = v.Meta + b[1] = v.UserMeta + sz := binary.PutUvarint(b[2:], v.ExpiresAt) + copy(b[2+sz:], v.Value) +} + +// EncodeTo should be kept in sync with the Encode function above. The reason +// this function exists is to avoid creating byte arrays per key-value pair in +// table/builder.go. +func (v *ValueStruct) EncodeTo(buf *bytes.Buffer) { + buf.WriteByte(v.Meta) + buf.WriteByte(v.UserMeta) + var enc [binary.MaxVarintLen64]byte + sz := binary.PutUvarint(enc[:], v.ExpiresAt) + buf.Write(enc[:sz]) + buf.Write(v.Value) +} + +// Iterator is an interface for a basic iterator. +type Iterator interface { + Next() + Rewind() + Seek(key []byte) + Key() []byte + Value() ValueStruct + Valid() bool + + // All iterators should be closed so that file garbage collection works. + Close() error +} + +type elem struct { + itr Iterator + nice int + reversed bool +} + +type elemHeap []*elem + +func (eh elemHeap) Len() int { return len(eh) } +func (eh elemHeap) Swap(i, j int) { eh[i], eh[j] = eh[j], eh[i] } +func (eh *elemHeap) Push(x interface{}) { *eh = append(*eh, x.(*elem)) } +func (eh *elemHeap) Pop() interface{} { + // Remove the last element, because Go has already swapped 0th elem <-> last. + old := *eh + n := len(old) + x := old[n-1] + *eh = old[0 : n-1] + return x +} +func (eh elemHeap) Less(i, j int) bool { + cmp := CompareKeys(eh[i].itr.Key(), eh[j].itr.Key()) + if cmp < 0 { + return !eh[i].reversed + } + if cmp > 0 { + return eh[i].reversed + } + // The keys are equal. In this case, lower nice take precedence. This is important. + return eh[i].nice < eh[j].nice +} + +// MergeIterator merges multiple iterators. +// NOTE: MergeIterator owns the array of iterators and is responsible for closing them. +type MergeIterator struct { + h elemHeap + curKey []byte + reversed bool + + all []Iterator +} + +// NewMergeIterator returns a new MergeIterator from a list of Iterators. +func NewMergeIterator(iters []Iterator, reversed bool) *MergeIterator { + m := &MergeIterator{all: iters, reversed: reversed} + m.h = make(elemHeap, 0, len(iters)) + m.initHeap() + return m +} + +func (s *MergeIterator) storeKey(smallest Iterator) { + if cap(s.curKey) < len(smallest.Key()) { + s.curKey = make([]byte, 2*len(smallest.Key())) + } + s.curKey = s.curKey[:len(smallest.Key())] + copy(s.curKey, smallest.Key()) +} + +// initHeap checks all iterators and initializes our heap and array of keys. +// Whenever we reverse direction, we need to run this. +func (s *MergeIterator) initHeap() { + s.h = s.h[:0] + for idx, itr := range s.all { + if !itr.Valid() { + continue + } + e := &elem{itr: itr, nice: idx, reversed: s.reversed} + s.h = append(s.h, e) + } + heap.Init(&s.h) + for len(s.h) > 0 { + it := s.h[0].itr + if it == nil || !it.Valid() { + heap.Pop(&s.h) + continue + } + s.storeKey(s.h[0].itr) + break + } +} + +// Valid returns whether the MergeIterator is at a valid element. +func (s *MergeIterator) Valid() bool { + if s == nil { + return false + } + if len(s.h) == 0 { + return false + } + return s.h[0].itr.Valid() +} + +// Key returns the key associated with the current iterator +func (s *MergeIterator) Key() []byte { + if len(s.h) == 0 { + return nil + } + return s.h[0].itr.Key() +} + +// Value returns the value associated with the iterator. +func (s *MergeIterator) Value() ValueStruct { + if len(s.h) == 0 { + return ValueStruct{} + } + return s.h[0].itr.Value() +} + +// Next returns the next element. If it is the same as the current key, ignore it. +func (s *MergeIterator) Next() { + if len(s.h) == 0 { + return + } + + smallest := s.h[0].itr + smallest.Next() + + for len(s.h) > 0 { + smallest = s.h[0].itr + if !smallest.Valid() { + heap.Pop(&s.h) + continue + } + + heap.Fix(&s.h, 0) + smallest = s.h[0].itr + if smallest.Valid() { + if !bytes.Equal(smallest.Key(), s.curKey) { + break + } + smallest.Next() + } + } + if !smallest.Valid() { + return + } + s.storeKey(smallest) +} + +// Rewind seeks to first element (or last element for reverse iterator). +func (s *MergeIterator) Rewind() { + for _, itr := range s.all { + itr.Rewind() + } + s.initHeap() +} + +// Seek brings us to element with key >= given key. +func (s *MergeIterator) Seek(key []byte) { + for _, itr := range s.all { + itr.Seek(key) + } + s.initHeap() +} + +// Close implements y.Iterator +func (s *MergeIterator) Close() error { + for _, itr := range s.all { + if err := itr.Close(); err != nil { + return errors.Wrap(err, "MergeIterator") + } + } + return nil +} diff --git a/vendor/github.com/dgraph-io/badger/y/iterator_test.go b/vendor/github.com/dgraph-io/badger/y/iterator_test.go new file mode 100644 index 00000000..cff88be0 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/y/iterator_test.go @@ -0,0 +1,234 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package y + +import ( + "sort" + "testing" + + "github.com/stretchr/testify/require" +) + +type SimpleIterator struct { + keys [][]byte + vals [][]byte + idx int + reversed bool +} + +var ( + closeCount int +) + +func (s *SimpleIterator) Close() error { closeCount++; return nil } + +func (s *SimpleIterator) Next() { + if !s.reversed { + s.idx++ + } else { + s.idx-- + } +} + +func (s *SimpleIterator) Rewind() { + if !s.reversed { + s.idx = 0 + } else { + s.idx = len(s.keys) - 1 + } +} + +func (s *SimpleIterator) Seek(key []byte) { + key = KeyWithTs(key, 0) + if !s.reversed { + s.idx = sort.Search(len(s.keys), func(i int) bool { + return CompareKeys(s.keys[i], key) >= 0 + }) + } else { + n := len(s.keys) + s.idx = n - 1 - sort.Search(n, func(i int) bool { + return CompareKeys(s.keys[n-1-i], key) <= 0 + }) + } +} + +func (s *SimpleIterator) Key() []byte { return s.keys[s.idx] } +func (s *SimpleIterator) Value() ValueStruct { + return ValueStruct{ + Value: s.vals[s.idx], + UserMeta: 55, + Meta: 0, + } +} +func (s *SimpleIterator) Valid() bool { + return s.idx >= 0 && s.idx < len(s.keys) +} + +func newSimpleIterator(keys []string, vals []string, reversed bool) *SimpleIterator { + k := make([][]byte, len(keys)) + v := make([][]byte, len(vals)) + AssertTrue(len(keys) == len(vals)) + for i := 0; i < len(keys); i++ { + k[i] = KeyWithTs([]byte(keys[i]), 0) + v[i] = []byte(vals[i]) + } + return &SimpleIterator{ + keys: k, + vals: v, + idx: -1, + reversed: reversed, + } +} + +func getAll(it Iterator) ([]string, []string) { + var keys, vals []string + for ; it.Valid(); it.Next() { + k := it.Key() + keys = append(keys, string(ParseKey(k))) + v := it.Value() + vals = append(vals, string(v.Value)) + } + return keys, vals +} + +func closeAndCheck(t *testing.T, it Iterator, expected int) { + closeCount = 0 + it.Close() + require.EqualValues(t, expected, closeCount) +} + +func TestSimpleIterator(t *testing.T) { + keys := []string{"1", "2", "3"} + vals := []string{"v1", "v2", "v3"} + it := newSimpleIterator(keys, vals, false) + it.Rewind() + k, v := getAll(it) + require.EqualValues(t, keys, k) + require.EqualValues(t, vals, v) + + closeAndCheck(t, it, 1) +} + +func reversed(a []string) []string { + var out []string + for i := len(a) - 1; i >= 0; i-- { + out = append(out, a[i]) + } + return out +} + +func TestMergeSingle(t *testing.T) { + keys := []string{"1", "2", "3"} + vals := []string{"v1", "v2", "v3"} + it := newSimpleIterator(keys, vals, false) + mergeIt := NewMergeIterator([]Iterator{it}, false) + mergeIt.Rewind() + k, v := getAll(mergeIt) + require.EqualValues(t, keys, k) + require.EqualValues(t, vals, v) + closeAndCheck(t, mergeIt, 1) +} + +func TestMergeSingleReversed(t *testing.T) { + keys := []string{"1", "2", "3"} + vals := []string{"v1", "v2", "v3"} + it := newSimpleIterator(keys, vals, true) + mergeIt := NewMergeIterator([]Iterator{it}, true) + mergeIt.Rewind() + k, v := getAll(mergeIt) + require.EqualValues(t, reversed(keys), k) + require.EqualValues(t, reversed(vals), v) + closeAndCheck(t, mergeIt, 1) +} + +func TestMergeMore(t *testing.T) { + it := newSimpleIterator([]string{"1", "3", "7"}, []string{"a1", "a3", "a7"}, false) + it2 := newSimpleIterator([]string{"2", "3", "5"}, []string{"b2", "b3", "b5"}, false) + it3 := newSimpleIterator([]string{"1"}, []string{"c1"}, false) + it4 := newSimpleIterator([]string{"1", "7", "9"}, []string{"d1", "d7", "d9"}, false) + + mergeIt := NewMergeIterator([]Iterator{it, it2, it3, it4}, false) + expectedKeys := []string{"1", "2", "3", "5", "7", "9"} + expectedVals := []string{"a1", "b2", "a3", "b5", "a7", "d9"} + mergeIt.Rewind() + k, v := getAll(mergeIt) + require.EqualValues(t, expectedKeys, k) + require.EqualValues(t, expectedVals, v) + closeAndCheck(t, mergeIt, 4) +} + +// Ensure MergeIterator satisfies the Iterator interface +func TestMergeIteratorNested(t *testing.T) { + keys := []string{"1", "2", "3"} + vals := []string{"v1", "v2", "v3"} + it := newSimpleIterator(keys, vals, false) + mergeIt := NewMergeIterator([]Iterator{it}, false) + mergeIt2 := NewMergeIterator([]Iterator{mergeIt}, false) + mergeIt2.Rewind() + k, v := getAll(mergeIt2) + require.EqualValues(t, keys, k) + require.EqualValues(t, vals, v) + closeAndCheck(t, mergeIt2, 1) +} + +func TestMergeIteratorSeek(t *testing.T) { + it := newSimpleIterator([]string{"1", "3", "7"}, []string{"a1", "a3", "a7"}, false) + it2 := newSimpleIterator([]string{"2", "3", "5"}, []string{"b2", "b3", "b5"}, false) + it3 := newSimpleIterator([]string{"1"}, []string{"c1"}, false) + it4 := newSimpleIterator([]string{"1", "7", "9"}, []string{"d1", "d7", "d9"}, false) + mergeIt := NewMergeIterator([]Iterator{it, it2, it3, it4}, false) + mergeIt.Seek([]byte("4")) + k, v := getAll(mergeIt) + require.EqualValues(t, []string{"5", "7", "9"}, k) + require.EqualValues(t, []string{"b5", "a7", "d9"}, v) + closeAndCheck(t, mergeIt, 4) +} + +func TestMergeIteratorSeekReversed(t *testing.T) { + it := newSimpleIterator([]string{"1", "3", "7"}, []string{"a1", "a3", "a7"}, true) + it2 := newSimpleIterator([]string{"2", "3", "5"}, []string{"b2", "b3", "b5"}, true) + it3 := newSimpleIterator([]string{"1"}, []string{"c1"}, true) + it4 := newSimpleIterator([]string{"1", "7", "9"}, []string{"d1", "d7", "d9"}, true) + mergeIt := NewMergeIterator([]Iterator{it, it2, it3, it4}, true) + mergeIt.Seek([]byte("5")) + k, v := getAll(mergeIt) + require.EqualValues(t, []string{"5", "3", "2", "1"}, k) + require.EqualValues(t, []string{"b5", "a3", "b2", "a1"}, v) + closeAndCheck(t, mergeIt, 4) +} + +func TestMergeIteratorSeekInvalid(t *testing.T) { + it := newSimpleIterator([]string{"1", "3", "7"}, []string{"a1", "a3", "a7"}, false) + it2 := newSimpleIterator([]string{"2", "3", "5"}, []string{"b2", "b3", "b5"}, false) + it3 := newSimpleIterator([]string{"1"}, []string{"c1"}, false) + it4 := newSimpleIterator([]string{"1", "7", "9"}, []string{"d1", "d7", "d9"}, false) + mergeIt := NewMergeIterator([]Iterator{it, it2, it3, it4}, false) + mergeIt.Seek([]byte("f")) + require.False(t, mergeIt.Valid()) + closeAndCheck(t, mergeIt, 4) +} + +func TestMergeIteratorSeekInvalidReversed(t *testing.T) { + it := newSimpleIterator([]string{"1", "3", "7"}, []string{"a1", "a3", "a7"}, true) + it2 := newSimpleIterator([]string{"2", "3", "5"}, []string{"b2", "b3", "b5"}, true) + it3 := newSimpleIterator([]string{"1"}, []string{"c1"}, true) + it4 := newSimpleIterator([]string{"1", "7", "9"}, []string{"d1", "d7", "d9"}, true) + mergeIt := NewMergeIterator([]Iterator{it, it2, it3, it4}, true) + mergeIt.Seek([]byte("0")) + require.False(t, mergeIt.Valid()) + closeAndCheck(t, mergeIt, 4) +} diff --git a/vendor/github.com/dgraph-io/badger/y/metrics.go b/vendor/github.com/dgraph-io/badger/y/metrics.go new file mode 100644 index 00000000..2de17d10 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/y/metrics.go @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package y + +import "expvar" + +var ( + // LSMSize has size of the LSM in bytes + LSMSize *expvar.Map + // VlogSize has size of the value log in bytes + VlogSize *expvar.Map + // PendingWrites tracks the number of pending writes. + PendingWrites *expvar.Map + + // These are cumulative + + // NumReads has cumulative number of reads + NumReads *expvar.Int + // NumWrites has cumulative number of writes + NumWrites *expvar.Int + // NumBytesRead has cumulative number of bytes read + NumBytesRead *expvar.Int + // NumBytesWritten has cumulative number of bytes written + NumBytesWritten *expvar.Int + // NumLSMGets is number of LMS gets + NumLSMGets *expvar.Map + // NumLSMBloomHits is number of LMS bloom hits + NumLSMBloomHits *expvar.Map + // NumGets is number of gets + NumGets *expvar.Int + // NumPuts is number of puts + NumPuts *expvar.Int + // NumBlockedPuts is number of blocked puts + NumBlockedPuts *expvar.Int + // NumMemtableGets is number of memtable gets + NumMemtableGets *expvar.Int +) + +// These variables are global and have cumulative values for all kv stores. +func init() { + NumReads = expvar.NewInt("badger_disk_reads_total") + NumWrites = expvar.NewInt("badger_disk_writes_total") + NumBytesRead = expvar.NewInt("badger_read_bytes") + NumBytesWritten = expvar.NewInt("badger_written_bytes") + NumLSMGets = expvar.NewMap("badger_lsm_level_gets_total") + NumLSMBloomHits = expvar.NewMap("badger_lsm_bloom_hits_total") + NumGets = expvar.NewInt("badger_gets_total") + NumPuts = expvar.NewInt("badger_puts_total") + NumBlockedPuts = expvar.NewInt("badger_blocked_puts_total") + NumMemtableGets = expvar.NewInt("badger_memtable_gets_total") + LSMSize = expvar.NewMap("badger_lsm_size_bytes") + VlogSize = expvar.NewMap("badger_vlog_size_bytes") + PendingWrites = expvar.NewMap("badger_pending_writes_total") +} diff --git a/vendor/github.com/dgraph-io/badger/y/mmap_unix.go b/vendor/github.com/dgraph-io/badger/y/mmap_unix.go new file mode 100644 index 00000000..0d6d70ea --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/y/mmap_unix.go @@ -0,0 +1,63 @@ +// +build !windows + +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package y + +import ( + "os" + "syscall" + "unsafe" + + "gx/ipfs/QmVGjyM9i2msKvLXwh9VosCTgP4mL91kC7hDmqnwTTx6Hu/sys/unix" +) + +// Mmap uses the mmap system call to memory-map a file. If writable is true, +// memory protection of the pages is set so that they may be written to as well. +func Mmap(fd *os.File, writable bool, size int64) ([]byte, error) { + mtype := unix.PROT_READ + if writable { + mtype |= unix.PROT_WRITE + } + return unix.Mmap(int(fd.Fd()), 0, int(size), mtype, unix.MAP_SHARED) +} + +// Munmap unmaps a previously mapped slice. +func Munmap(b []byte) error { + return unix.Munmap(b) +} + +// Madvise uses the madvise system call to give advise about the use of memory +// when using a slice that is memory-mapped to a file. Set the readahead flag to +// false if page references are expected in random order. +func Madvise(b []byte, readahead bool) error { + flags := unix.MADV_NORMAL + if !readahead { + flags = unix.MADV_RANDOM + } + return madvise(b, flags) +} + +// This is required because the unix package does not support the madvise system call on OS X. +func madvise(b []byte, advice int) (err error) { + _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), + uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/dgraph-io/badger/y/mmap_windows.go b/vendor/github.com/dgraph-io/badger/y/mmap_windows.go new file mode 100644 index 00000000..0efb2d0f --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/y/mmap_windows.go @@ -0,0 +1,90 @@ +// +build windows + +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package y + +import ( + "fmt" + "os" + "syscall" + "unsafe" +) + +func Mmap(fd *os.File, write bool, size int64) ([]byte, error) { + protect := syscall.PAGE_READONLY + access := syscall.FILE_MAP_READ + + if write { + protect = syscall.PAGE_READWRITE + access = syscall.FILE_MAP_WRITE + } + fi, err := fd.Stat() + if err != nil { + return nil, err + } + + // Truncate the database to the size of the mmap. + if fi.Size() < size { + if err := fd.Truncate(size); err != nil { + return nil, fmt.Errorf("truncate: %s", err) + } + } + + // Open a file mapping handle. + sizelo := uint32(size >> 32) + sizehi := uint32(size) & 0xffffffff + + handler, err := syscall.CreateFileMapping(syscall.Handle(fd.Fd()), nil, + uint32(protect), sizelo, sizehi, nil) + if err != nil { + return nil, os.NewSyscallError("CreateFileMapping", err) + } + + // Create the memory map. + addr, err := syscall.MapViewOfFile(handler, uint32(access), 0, 0, uintptr(size)) + if addr == 0 { + return nil, os.NewSyscallError("MapViewOfFile", err) + } + + // Close mapping handle. + if err := syscall.CloseHandle(syscall.Handle(handler)); err != nil { + return nil, os.NewSyscallError("CloseHandle", err) + } + + // Slice memory layout + // Copied this snippet from golang/sys package + var sl = struct { + addr uintptr + len int + cap int + }{addr, int(size), int(size)} + + // Use unsafe to turn sl into a []byte. + data := *(*[]byte)(unsafe.Pointer(&sl)) + + return data, nil +} + +func Munmap(b []byte) error { + return syscall.UnmapViewOfFile(uintptr(unsafe.Pointer(&b[0]))) +} + +func Madvise(b []byte, readahead bool) error { + // Do Nothing. We don’t care about this setting on Windows + return nil +} diff --git a/vendor/github.com/dgraph-io/badger/y/watermark.go b/vendor/github.com/dgraph-io/badger/y/watermark.go new file mode 100644 index 00000000..6d17c3cb --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/y/watermark.go @@ -0,0 +1,233 @@ +/* + * Copyright 2016-2018 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package y + +import ( + "container/heap" + "context" + "sync/atomic" + + "gx/ipfs/QmRvYNctevGUW52urgmoFZscT6buMKqhHezLUS64WepGWn/go-net/trace" +) + +type uint64Heap []uint64 + +func (u uint64Heap) Len() int { return len(u) } +func (u uint64Heap) Less(i int, j int) bool { return u[i] < u[j] } +func (u uint64Heap) Swap(i int, j int) { u[i], u[j] = u[j], u[i] } +func (u *uint64Heap) Push(x interface{}) { *u = append(*u, x.(uint64)) } +func (u *uint64Heap) Pop() interface{} { + old := *u + n := len(old) + x := old[n-1] + *u = old[0 : n-1] + return x +} + +// mark contains one of more indices, along with a done boolean to indicate the +// status of the index: begin or done. It also contains waiters, who could be +// waiting for the watermark to reach >= a certain index. +type mark struct { + // Either this is an (index, waiter) pair or (index, done) or (indices, done). + index uint64 + waiter chan struct{} + indices []uint64 + done bool // Set to true if the index is done. +} + +// WaterMark is used to keep track of the minimum un-finished index. Typically, an index k becomes +// finished or "done" according to a WaterMark once Done(k) has been called +// 1. as many times as Begin(k) has, AND +// 2. a positive number of times. +// +// An index may also become "done" by calling SetDoneUntil at a time such that it is not +// inter-mingled with Begin/Done calls. +// +// Since doneUntil and lastIndex addresses are passed to sync/atomic packages, we ensure that they +// are 64-bit aligned by putting them at the beginning of the structure. +type WaterMark struct { + doneUntil uint64 + lastIndex uint64 + Name string + markCh chan mark + elog trace.EventLog +} + +// Init initializes a WaterMark struct. MUST be called before using it. +func (w *WaterMark) Init(closer *Closer) { + w.markCh = make(chan mark, 100) + w.elog = trace.NewEventLog("Watermark", w.Name) + go w.process(closer) +} + +// Begin sets the last index to the given value. +func (w *WaterMark) Begin(index uint64) { + atomic.StoreUint64(&w.lastIndex, index) + w.markCh <- mark{index: index, done: false} +} + +// BeginMany works like Begin but accepts multiple indices. +func (w *WaterMark) BeginMany(indices []uint64) { + atomic.StoreUint64(&w.lastIndex, indices[len(indices)-1]) + w.markCh <- mark{index: 0, indices: indices, done: false} +} + +// Done sets a single index as done. +func (w *WaterMark) Done(index uint64) { + w.markCh <- mark{index: index, done: true} +} + +// DoneMany works like Done but accepts multiple indices. +func (w *WaterMark) DoneMany(indices []uint64) { + w.markCh <- mark{index: 0, indices: indices, done: true} +} + +// DoneUntil returns the maximum index that has the property that all indices +// less than or equal to it are done. +func (w *WaterMark) DoneUntil() uint64 { + return atomic.LoadUint64(&w.doneUntil) +} + +// SetDoneUntil sets the maximum index that has the property that all indices +// less than or equal to it are done. +func (w *WaterMark) SetDoneUntil(val uint64) { + atomic.StoreUint64(&w.doneUntil, val) +} + +// LastIndex returns the last index for which Begin has been called. +func (w *WaterMark) LastIndex() uint64 { + return atomic.LoadUint64(&w.lastIndex) +} + +// WaitForMark waits until the given index is marked as done. +func (w *WaterMark) WaitForMark(ctx context.Context, index uint64) error { + if w.DoneUntil() >= index { + return nil + } + waitCh := make(chan struct{}) + w.markCh <- mark{index: index, waiter: waitCh} + + select { + case <-ctx.Done(): + return ctx.Err() + case <-waitCh: + return nil + } +} + +// process is used to process the Mark channel. This is not thread-safe, +// so only run one goroutine for process. One is sufficient, because +// all goroutine ops use purely memory and cpu. +// Each index has to emit atleast one begin watermark in serial order otherwise waiters +// can get blocked idefinitely. Example: We had an watermark at 100 and a waiter at 101, +// if no watermark is emitted at index 101 then waiter would get stuck indefinitely as it +// can't decide whether the task at 101 has decided not to emit watermark or it didn't get +// scheduled yet. +func (w *WaterMark) process(closer *Closer) { + defer closer.Done() + + var indices uint64Heap + // pending maps raft proposal index to the number of pending mutations for this proposal. + pending := make(map[uint64]int) + waiters := make(map[uint64][]chan struct{}) + + heap.Init(&indices) + var loop uint64 + + processOne := func(index uint64, done bool) { + // If not already done, then set. Otherwise, don't undo a done entry. + prev, present := pending[index] + if !present { + heap.Push(&indices, index) + } + + delta := 1 + if done { + delta = -1 + } + pending[index] = prev + delta + + loop++ + if len(indices) > 0 && loop%10000 == 0 { + min := indices[0] + w.elog.Printf("WaterMark %s: Done entry %4d. Size: %4d Watermark: %-4d Looking for: %-4d. Value: %d\n", + w.Name, index, len(indices), w.DoneUntil(), min, pending[min]) + } + + // Update mark by going through all indices in order; and checking if they have + // been done. Stop at the first index, which isn't done. + doneUntil := w.DoneUntil() + if doneUntil > index { + AssertTruef(false, "Name: %s doneUntil: %d. Index: %d", w.Name, doneUntil, index) + } + + until := doneUntil + loops := 0 + + for len(indices) > 0 { + min := indices[0] + if done := pending[min]; done > 0 { + break // len(indices) will be > 0. + } + // Even if done is called multiple times causing it to become + // negative, we should still pop the index. + heap.Pop(&indices) + delete(pending, min) + until = min + loops++ + } + for i := doneUntil + 1; i <= until; i++ { + toNotify := waiters[i] + for _, ch := range toNotify { + close(ch) + } + delete(waiters, i) // Release the memory back. + } + if until != doneUntil { + AssertTrue(atomic.CompareAndSwapUint64(&w.doneUntil, doneUntil, until)) + w.elog.Printf("%s: Done until %d. Loops: %d\n", w.Name, until, loops) + } + } + + for { + select { + case <-closer.HasBeenClosed(): + return + case mark := <-w.markCh: + if mark.waiter != nil { + doneUntil := atomic.LoadUint64(&w.doneUntil) + if doneUntil >= mark.index { + close(mark.waiter) + } else { + ws, ok := waiters[mark.index] + if !ok { + waiters[mark.index] = []chan struct{}{mark.waiter} + } else { + waiters[mark.index] = append(ws, mark.waiter) + } + } + } else { + if mark.index > 0 { + processOne(mark.index, mark.done) + } + for _, index := range mark.indices { + processOne(index, mark.done) + } + } + } + } +} diff --git a/vendor/github.com/dgraph-io/badger/y/y.go b/vendor/github.com/dgraph-io/badger/y/y.go new file mode 100644 index 00000000..47cb2922 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/y/y.go @@ -0,0 +1,286 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package y + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash/crc32" + "math" + "os" + "sync" + "time" + + "gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors" +) + +// ErrEOF indicates an end of file when trying to read from a memory mapped file +// and encountering the end of slice. +var ErrEOF = errors.New("End of mapped region") + +const ( + // Sync indicates that O_DSYNC should be set on the underlying file, + // ensuring that data writes do not return until the data is flushed + // to disk. + Sync = 1 << iota + // ReadOnly opens the underlying file on a read-only basis. + ReadOnly +) + +var ( + // This is O_DSYNC (datasync) on platforms that support it -- see file_unix.go + datasyncFileFlag = 0x0 + + // CastagnoliCrcTable is a CRC32 polynomial table + CastagnoliCrcTable = crc32.MakeTable(crc32.Castagnoli) +) + +// OpenExistingFile opens an existing file, errors if it doesn't exist. +func OpenExistingFile(filename string, flags uint32) (*os.File, error) { + openFlags := os.O_RDWR + if flags&ReadOnly != 0 { + openFlags = os.O_RDONLY + } + + if flags&Sync != 0 { + openFlags |= datasyncFileFlag + } + return os.OpenFile(filename, openFlags, 0) +} + +// CreateSyncedFile creates a new file (using O_EXCL), errors if it already existed. +func CreateSyncedFile(filename string, sync bool) (*os.File, error) { + flags := os.O_RDWR | os.O_CREATE | os.O_EXCL + if sync { + flags |= datasyncFileFlag + } + return os.OpenFile(filename, flags, 0666) +} + +// OpenSyncedFile creates the file if one doesn't exist. +func OpenSyncedFile(filename string, sync bool) (*os.File, error) { + flags := os.O_RDWR | os.O_CREATE + if sync { + flags |= datasyncFileFlag + } + return os.OpenFile(filename, flags, 0666) +} + +// OpenTruncFile opens the file with O_RDWR | O_CREATE | O_TRUNC +func OpenTruncFile(filename string, sync bool) (*os.File, error) { + flags := os.O_RDWR | os.O_CREATE | os.O_TRUNC + if sync { + flags |= datasyncFileFlag + } + return os.OpenFile(filename, flags, 0666) +} + +// SafeCopy does append(a[:0], src...). +func SafeCopy(a []byte, src []byte) []byte { + return append(a[:0], src...) +} + +// Copy copies a byte slice and returns the copied slice. +func Copy(a []byte) []byte { + b := make([]byte, len(a)) + copy(b, a) + return b +} + +// KeyWithTs generates a new key by appending ts to key. +func KeyWithTs(key []byte, ts uint64) []byte { + out := make([]byte, len(key)+8) + copy(out, key) + binary.BigEndian.PutUint64(out[len(key):], math.MaxUint64-ts) + return out +} + +// ParseTs parses the timestamp from the key bytes. +func ParseTs(key []byte) uint64 { + if len(key) <= 8 { + return 0 + } + return math.MaxUint64 - binary.BigEndian.Uint64(key[len(key)-8:]) +} + +// CompareKeys checks the key without timestamp and checks the timestamp if keyNoTs +// is same. +// a would be sorted higher than aa if we use bytes.compare +// All keys should have timestamp. +func CompareKeys(key1 []byte, key2 []byte) int { + AssertTrue(len(key1) > 8 && len(key2) > 8) + if cmp := bytes.Compare(key1[:len(key1)-8], key2[:len(key2)-8]); cmp != 0 { + return cmp + } + return bytes.Compare(key1[len(key1)-8:], key2[len(key2)-8:]) +} + +// ParseKey parses the actual key from the key bytes. +func ParseKey(key []byte) []byte { + if key == nil { + return nil + } + + AssertTrue(len(key) > 8) + return key[:len(key)-8] +} + +// SameKey checks for key equality ignoring the version timestamp suffix. +func SameKey(src, dst []byte) bool { + if len(src) != len(dst) { + return false + } + return bytes.Equal(ParseKey(src), ParseKey(dst)) +} + +// Slice holds a reusable buf, will reallocate if you request a larger size than ever before. +// One problem is with n distinct sizes in random order it'll reallocate log(n) times. +type Slice struct { + buf []byte +} + +// Resize reuses the Slice's buffer (or makes a new one) and returns a slice in that buffer of +// length sz. +func (s *Slice) Resize(sz int) []byte { + if cap(s.buf) < sz { + s.buf = make([]byte, sz) + } + return s.buf[0:sz] +} + +// FixedDuration returns a string representation of the given duration with the +// hours, minutes, and seconds. +func FixedDuration(d time.Duration) string { + str := fmt.Sprintf("%02ds", int(d.Seconds())%60) + if d >= time.Minute { + str = fmt.Sprintf("%02dm", int(d.Minutes())%60) + str + } + if d >= time.Hour { + str = fmt.Sprintf("%02dh", int(d.Hours())) + str + } + return str +} + +// Closer holds the two things we need to close a goroutine and wait for it to finish: a chan +// to tell the goroutine to shut down, and a WaitGroup with which to wait for it to finish shutting +// down. +type Closer struct { + closed chan struct{} + waiting sync.WaitGroup +} + +// NewCloser constructs a new Closer, with an initial count on the WaitGroup. +func NewCloser(initial int) *Closer { + ret := &Closer{closed: make(chan struct{})} + ret.waiting.Add(initial) + return ret +} + +// AddRunning Add()'s delta to the WaitGroup. +func (lc *Closer) AddRunning(delta int) { + lc.waiting.Add(delta) +} + +// Signal signals the HasBeenClosed signal. +func (lc *Closer) Signal() { + close(lc.closed) +} + +// HasBeenClosed gets signaled when Signal() is called. +func (lc *Closer) HasBeenClosed() <-chan struct{} { + return lc.closed +} + +// Done calls Done() on the WaitGroup. +func (lc *Closer) Done() { + lc.waiting.Done() +} + +// Wait waits on the WaitGroup. (It waits for NewCloser's initial value, AddRunning, and Done +// calls to balance out.) +func (lc *Closer) Wait() { + lc.waiting.Wait() +} + +// SignalAndWait calls Signal(), then Wait(). +func (lc *Closer) SignalAndWait() { + lc.Signal() + lc.Wait() +} + +// Throttle allows a limited number of workers to run at a time. It also +// provides a mechanism to check for errors encountered by workers and wait for +// them to finish. +type Throttle struct { + wg sync.WaitGroup + ch chan struct{} + errCh chan error +} + +// NewThrottle creates a new throttle with a max number of workers. +func NewThrottle(max int) *Throttle { + return &Throttle{ + ch: make(chan struct{}, max), + errCh: make(chan error, max), + } +} + +// Do should be called by workers before they start working. It blocks if there +// are already maximum number of workers working. If it detects an error from +// previously Done workers, it would return it. +func (t *Throttle) Do() error { + for { + select { + case t.ch <- struct{}{}: + t.wg.Add(1) + return nil + case err := <-t.errCh: + if err != nil { + return err + } + } + } +} + +// Done should be called by workers when they finish working. They can also +// pass the error status of work done. +func (t *Throttle) Done(err error) { + if err != nil { + t.errCh <- err + } + select { + case <-t.ch: + default: + panic("Throttle Do Done mismatch") + } + t.wg.Done() +} + +// Finish waits until all workers have finished working. It would return any +// error passed by Done. +func (t *Throttle) Finish() error { + t.wg.Wait() + close(t.ch) + close(t.errCh) + for err := range t.errCh { + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/dgryski/go-farm/.gitignore b/vendor/github.com/dgryski/go-farm/.gitignore new file mode 100644 index 00000000..36029ab5 --- /dev/null +++ b/vendor/github.com/dgryski/go-farm/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +*.exe +*.test +*.prof + +target diff --git a/vendor/github.com/dgryski/go-farm/.travis.yml b/vendor/github.com/dgryski/go-farm/.travis.yml new file mode 100644 index 00000000..bc89a55d --- /dev/null +++ b/vendor/github.com/dgryski/go-farm/.travis.yml @@ -0,0 +1,39 @@ +language: go + +sudo: false + +branches: + except: + - release + +branches: + only: + - master + - develop + - travis + +go: + - 1.11.x + - 1.12.x + - tip + +matrix: + allow_failures: + - go: tip + +before_install: + - if [ -n "$GH_USER" ]; then git config --global github.user ${GH_USER}; fi; + - if [ -n "$GH_TOKEN" ]; then git config --global github.token ${GH_TOKEN}; fi; + - go get github.com/mattn/goveralls + +before_script: + - make deps + +script: + - make qa + +after_failure: + - cat ./target/test/report.xml + +after_success: + - if [ "$TRAVIS_GO_VERSION" = "1.9" ]; then $HOME/gopath/bin/goveralls -covermode=count -coverprofile=target/report/coverage.out -service=travis-ci; fi; diff --git a/vendor/github.com/dgryski/go-farm/LICENSE b/vendor/github.com/dgryski/go-farm/LICENSE new file mode 100644 index 00000000..3d07f666 --- /dev/null +++ b/vendor/github.com/dgryski/go-farm/LICENSE @@ -0,0 +1,23 @@ +As this is a highly derivative work, I have placed it under the same license as the original implementation: + +Copyright (c) 2014-2017 Damian Gryski +Copyright (c) 2016-2017 Nicola Asuni - Tecnick.com + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/github.com/dgryski/go-farm/Makefile b/vendor/github.com/dgryski/go-farm/Makefile new file mode 100644 index 00000000..c189c95d --- /dev/null +++ b/vendor/github.com/dgryski/go-farm/Makefile @@ -0,0 +1,203 @@ +# MAKEFILE +# +# @author Nicola Asuni +# @link https://github.com/dgryski/go-farm +# +# This file is intended to be executed in a Linux-compatible system. +# It also assumes that the project has been cloned in the right path under GOPATH: +# $GOPATH/src/github.com/dgryski/go-farm +# +# ------------------------------------------------------------------------------ + +# List special make targets that are not associated with files +.PHONY: help all test format fmtcheck vet lint coverage cyclo ineffassign misspell structcheck varcheck errcheck gosimple astscan qa deps clean nuke + +# Use bash as shell (Note: Ubuntu now uses dash which doesn't support PIPESTATUS). +SHELL=/bin/bash + +# CVS path (path to the parent dir containing the project) +CVSPATH=github.com/dgryski + +# Project owner +OWNER=dgryski + +# Project vendor +VENDOR=dgryski + +# Project name +PROJECT=go-farm + +# Project version +VERSION=$(shell cat VERSION) + +# Name of RPM or DEB package +PKGNAME=${VENDOR}-${PROJECT} + +# Current directory +CURRENTDIR=$(shell pwd) + +# GO lang path +ifneq ($(GOPATH),) + ifeq ($(findstring $(GOPATH),$(CURRENTDIR)),) + # the defined GOPATH is not valid + GOPATH= + endif +endif +ifeq ($(GOPATH),) + # extract the GOPATH + GOPATH=$(firstword $(subst /src/, ,$(CURRENTDIR))) +endif + +# --- MAKE TARGETS --- + +# Display general help about this command +help: + @echo "" + @echo "$(PROJECT) Makefile." + @echo "GOPATH=$(GOPATH)" + @echo "The following commands are available:" + @echo "" + @echo " make qa : Run all the tests" + @echo " make test : Run the unit tests" + @echo "" + @echo " make format : Format the source code" + @echo " make fmtcheck : Check if the source code has been formatted" + @echo " make vet : Check for suspicious constructs" + @echo " make lint : Check for style errors" + @echo " make coverage : Generate the coverage report" + @echo " make cyclo : Generate the cyclomatic complexity report" + @echo " make ineffassign : Detect ineffectual assignments" + @echo " make misspell : Detect commonly misspelled words in source files" + @echo " make structcheck : Find unused struct fields" + @echo " make varcheck : Find unused global variables and constants" + @echo " make errcheck : Check that error return values are used" + @echo " make gosimple : Suggest code simplifications" + @echo " make astscan : GO AST scanner" + @echo "" + @echo " make docs : Generate source code documentation" + @echo "" + @echo " make deps : Get the dependencies" + @echo " make clean : Remove any build artifact" + @echo " make nuke : Deletes any intermediate file" + @echo "" + + +# Alias for help target +all: help + +# Run the unit tests +test: + @mkdir -p target/test + @mkdir -p target/report + GOPATH=$(GOPATH) \ + go test \ + -covermode=atomic \ + -bench=. \ + -race \ + -cpuprofile=target/report/cpu.out \ + -memprofile=target/report/mem.out \ + -mutexprofile=target/report/mutex.out \ + -coverprofile=target/report/coverage.out \ + -v ./... | \ + tee >(PATH=$(GOPATH)/bin:$(PATH) go-junit-report > target/test/report.xml); \ + test $${PIPESTATUS[0]} -eq 0 + +# Format the source code +format: + @find . -type f -name "*.go" -exec gofmt -s -w {} \; + +# Check if the source code has been formatted +fmtcheck: + @mkdir -p target + @find . -type f -name "*.go" -exec gofmt -s -d {} \; | tee target/format.diff + @test ! -s target/format.diff || { echo "ERROR: the source code has not been formatted - please use 'make format' or 'gofmt'"; exit 1; } + +# Check for syntax errors +vet: + GOPATH=$(GOPATH) go vet . + +# Check for style errors +lint: + GOPATH=$(GOPATH) PATH=$(GOPATH)/bin:$(PATH) golint . + +# Generate the coverage report +coverage: + @mkdir -p target/report + GOPATH=$(GOPATH) \ + go tool cover -html=target/report/coverage.out -o target/report/coverage.html + +# Report cyclomatic complexity +cyclo: + @mkdir -p target/report + GOPATH=$(GOPATH) gocyclo -avg ./ | tee target/report/cyclo.txt ; test $${PIPESTATUS[0]} -eq 0 + +# Detect ineffectual assignments +ineffassign: + @mkdir -p target/report + GOPATH=$(GOPATH) ineffassign ./ | tee target/report/ineffassign.txt ; test $${PIPESTATUS[0]} -eq 0 + +# Detect commonly misspelled words in source files +misspell: + @mkdir -p target/report + GOPATH=$(GOPATH) misspell -error ./ | tee target/report/misspell.txt ; test $${PIPESTATUS[0]} -eq 0 + +# Find unused struct fields +structcheck: + @mkdir -p target/report + GOPATH=$(GOPATH) structcheck -a ./ | tee target/report/structcheck.txt + +# Find unused global variables and constants +varcheck: + @mkdir -p target/report + GOPATH=$(GOPATH) varcheck -e ./ | tee target/report/varcheck.txt + +# Check that error return values are used +errcheck: + @mkdir -p target/report + GOPATH=$(GOPATH) errcheck ./ | tee target/report/errcheck.txt + +# Suggest code simplifications +gosimple: + @mkdir -p target/report + GOPATH=$(GOPATH) gosimple ./ | tee target/report/gosimple.txt + +# AST scanner +astscan: + @mkdir -p target/report + GOPATH=$(GOPATH) gas .//*.go | tee target/report/astscan.txt + +# Generate source docs +docs: + @mkdir -p target/docs + nohup sh -c 'GOPATH=$(GOPATH) godoc -http=127.0.0.1:6060' > target/godoc_server.log 2>&1 & + wget --directory-prefix=target/docs/ --execute robots=off --retry-connrefused --recursive --no-parent --adjust-extension --page-requisites --convert-links http://127.0.0.1:6060/pkg/github.com/${VENDOR}/${PROJECT}/ ; kill -9 `lsof -ti :6060` + @echo ''${PKGNAME}' Documentation ...' > target/docs/index.html + +# Alias to run all quality-assurance checks +qa: fmtcheck test vet lint coverage cyclo ineffassign misspell structcheck varcheck errcheck gosimple astscan + +# --- INSTALL --- + +# Get the dependencies +deps: + GOPATH=$(GOPATH) go get ./... + GOPATH=$(GOPATH) go get golang.org/x/lint/golint + GOPATH=$(GOPATH) go get github.com/jstemmer/go-junit-report + GOPATH=$(GOPATH) go get github.com/axw/gocov/gocov + GOPATH=$(GOPATH) go get github.com/fzipp/gocyclo + GOPATH=$(GOPATH) go get github.com/gordonklaus/ineffassign + GOPATH=$(GOPATH) go get github.com/client9/misspell/cmd/misspell + GOPATH=$(GOPATH) go get github.com/opennota/check/cmd/structcheck + GOPATH=$(GOPATH) go get github.com/opennota/check/cmd/varcheck + GOPATH=$(GOPATH) go get github.com/kisielk/errcheck + GOPATH=$(GOPATH) go get honnef.co/go/tools/cmd/gosimple + GOPATH=$(GOPATH) go get github.com/GoASTScanner/gas + +# Remove any build artifact +clean: + GOPATH=$(GOPATH) go clean ./... + +# Deletes any intermediate file +nuke: + rm -rf ./target + GOPATH=$(GOPATH) go clean -i ./... diff --git a/vendor/github.com/dgryski/go-farm/README.md b/vendor/github.com/dgryski/go-farm/README.md new file mode 100644 index 00000000..dd07d6f9 --- /dev/null +++ b/vendor/github.com/dgryski/go-farm/README.md @@ -0,0 +1,41 @@ +# go-farm + +*Google's FarmHash hash functions implemented in Go* + +[![Master Branch](https://img.shields.io/badge/-master:-gray.svg)](https://github.com/dgryski/go-farm/tree/master) +[![Master Build Status](https://secure.travis-ci.org/dgryski/go-farm.png?branch=master)](https://travis-ci.org/dgryski/go-farm?branch=master) +[![Master Coverage Status](https://coveralls.io/repos/dgryski/go-farm/badge.svg?branch=master&service=github)](https://coveralls.io/github/dgryski/go-farm?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/dgryski/go-farm)](https://goreportcard.com/report/github.com/dgryski/go-farm) +[![GoDoc](https://godoc.org/github.com/dgryski/go-farm?status.svg)](http://godoc.org/github.com/dgryski/go-farm) + +## Description + +FarmHash, a family of hash functions. + +This is a (mechanical) translation of the non-SSE4/non-AESNI hash functions from Google's FarmHash (https://github.com/google/farmhash). + + +FarmHash provides hash functions for strings and other data. +The functions mix the input bits thoroughly but are not suitable for cryptography. + +All members of the FarmHash family were designed with heavy reliance on previous work by Jyrki Alakuijala, Austin Appleby, Bob Jenkins, and others. + +For more information please consult https://github.com/google/farmhash + + +## Getting started + +This application is written in Go language, please refer to the guides in https://golang.org for getting started. + +This project include a Makefile that allows you to test and build the project with simple commands. +To see all available options: +```bash +make help +``` + +## Running all tests + +Before committing the code, please check if it passes all tests using +```bash +make qa +``` diff --git a/vendor/github.com/dgryski/go-farm/VERSION b/vendor/github.com/dgryski/go-farm/VERSION new file mode 100644 index 00000000..38f77a65 --- /dev/null +++ b/vendor/github.com/dgryski/go-farm/VERSION @@ -0,0 +1 @@ +2.0.1 diff --git a/vendor/github.com/dgryski/go-farm/asm.go b/vendor/github.com/dgryski/go-farm/asm.go new file mode 100644 index 00000000..1d69eebb --- /dev/null +++ b/vendor/github.com/dgryski/go-farm/asm.go @@ -0,0 +1,898 @@ +// +build ignore + +package main + +import ( + "flag" + + . "github.com/mmcloughlin/avo/build" + . "github.com/mmcloughlin/avo/operand" + . "github.com/mmcloughlin/avo/reg" +) + +const k0 uint64 = 0xc3a5c85c97cb3127 +const k1 uint64 = 0xb492b66fbe98f273 +const k2 uint64 = 0x9ae16a3b2f90404f + +const c1 uint32 = 0xcc9e2d51 +const c2 uint32 = 0x1b873593 + +func shiftMix(val GPVirtual) GPVirtual { + r := GP64() + MOVQ(val, r) + SHRQ(Imm(47), r) + XORQ(val, r) + return r +} + +func shiftMix64(val uint64) uint64 { + return val ^ (val >> 47) +} + +func hashLen16MulLine(a, b, c, d, k, mul GPVirtual) GPVirtual { + tmpa := GP64() + MOVQ(a, tmpa) + + ADDQ(b, tmpa) + RORQ(Imm(43), tmpa) + ADDQ(d, tmpa) + tmpc := GP64() + MOVQ(c, tmpc) + RORQ(Imm(30), tmpc) + ADDQ(tmpc, tmpa) + + ADDQ(c, a) + ADDQ(k, b) + RORQ(Imm(18), b) + ADDQ(b, a) + + r := hashLen16Mul(tmpa, a, mul) + return r +} + +func hashLen16Mul(u, v, mul GPVirtual) GPVirtual { + XORQ(v, u) + IMULQ(mul, u) + a := shiftMix(u) + + XORQ(a, v) + IMULQ(mul, v) + b := shiftMix(v) + + IMULQ(mul, b) + + return b +} + +func hashLen0to16(sbase, slen GPVirtual) { + CMPQ(slen, Imm(8)) + JL(LabelRef("check4")) + { + a := GP64() + MOVQ(Mem{Base: sbase}, a) + + b := GP64() + t := GP64() + MOVQ(slen, t) + SUBQ(Imm(8), t) + ADDQ(sbase, t) + MOVQ(Mem{Base: t}, b) + + rk2 := GP64() + MOVQ(Imm(k2), rk2) + + ADDQ(rk2, a) + + mul := slen + SHLQ(Imm(1), mul) + ADDQ(rk2, mul) + + c := GP64() + MOVQ(b, c) + RORQ(Imm(37), c) + IMULQ(mul, c) + ADDQ(a, c) + + d := GP64() + MOVQ(a, d) + RORQ(Imm(25), d) + ADDQ(b, d) + IMULQ(mul, d) + + r := hashLen16Mul(c, d, mul) + Store(r, ReturnIndex(0)) + RET() + } + + Label("check4") + + CMPQ(slen, Imm(4)) + JL(LabelRef("check0")) + { + rk2 := GP64() + MOVQ(Imm(k2), rk2) + + mul := GP64() + MOVQ(slen, mul) + SHLQ(Imm(1), mul) + ADDQ(rk2, mul) + + a := GP64() + MOVL(Mem{Base: sbase}, a.As32()) + + SHLQ(Imm(3), a) + ADDQ(slen, a) + + b := GP64() + SUBQ(Imm(4), slen) + ADDQ(slen, sbase) + MOVL(Mem{Base: sbase}, b.As32()) + r := hashLen16Mul(a, b, mul) + + Store(r, ReturnIndex(0)) + RET() + } + + Label("check0") + TESTQ(slen, slen) + JZ(LabelRef("empty")) + { + + a := GP64() + MOVBQZX(Mem{Base: sbase}, a) + + base := GP64() + MOVQ(slen, base) + SHRQ(Imm(1), base) + + b := GP64() + ADDQ(sbase, base) + MOVBQZX(Mem{Base: base}, b) + + MOVQ(slen, base) + SUBQ(Imm(1), base) + c := GP64() + ADDQ(sbase, base) + MOVBQZX(Mem{Base: base}, c) + + SHLQ(Imm(8), b) + ADDQ(b, a) + y := a + + SHLQ(Imm(2), c) + ADDQ(c, slen) + z := slen + + rk0 := GP64() + MOVQ(Imm(k0), rk0) + IMULQ(rk0, z) + + rk2 := GP64() + MOVQ(Imm(k2), rk2) + + IMULQ(rk2, y) + XORQ(y, z) + + r := shiftMix(z) + IMULQ(rk2, r) + + Store(r, ReturnIndex(0)) + RET() + } + + Label("empty") + + ret := GP64() + MOVQ(Imm(k2), ret) + Store(ret, ReturnIndex(0)) + RET() +} + +func hashLen17to32(sbase, slen GPVirtual) { + mul := GP64() + MOVQ(slen, mul) + SHLQ(Imm(1), mul) + + rk2 := GP64() + MOVQ(Imm(k2), rk2) + ADDQ(rk2, mul) + + a := GP64() + MOVQ(Mem{Base: sbase}, a) + + rk1 := GP64() + MOVQ(Imm(k1), rk1) + IMULQ(rk1, a) + + b := GP64() + MOVQ(Mem{Base: sbase, Disp: 8}, b) + + base := GP64() + MOVQ(slen, base) + SUBQ(Imm(16), base) + ADDQ(sbase, base) + + c := GP64() + MOVQ(Mem{Base: base, Disp: 8}, c) + IMULQ(mul, c) + + d := GP64() + MOVQ(Mem{Base: base}, d) + IMULQ(rk2, d) + + r := hashLen16MulLine(a, b, c, d, rk2, mul) + Store(r, ReturnIndex(0)) + RET() +} + +// Return an 8-byte hash for 33 to 64 bytes. +func hashLen33to64(sbase, slen GPVirtual) { + mul := GP64() + MOVQ(slen, mul) + SHLQ(Imm(1), mul) + + rk2 := GP64() + MOVQ(Imm(k2), rk2) + ADDQ(rk2, mul) + + a := GP64() + MOVQ(Mem{Base: sbase}, a) + IMULQ(rk2, a) + + b := GP64() + MOVQ(Mem{Base: sbase, Disp: 8}, b) + + base := GP64() + MOVQ(slen, base) + SUBQ(Imm(16), base) + ADDQ(sbase, base) + + c := GP64() + MOVQ(Mem{Base: base, Disp: 8}, c) + IMULQ(mul, c) + + d := GP64() + MOVQ(Mem{Base: base}, d) + IMULQ(rk2, d) + + y := GP64() + MOVQ(a, y) + + ADDQ(b, y) + RORQ(Imm(43), y) + ADDQ(d, y) + tmpc := GP64() + MOVQ(c, tmpc) + RORQ(Imm(30), tmpc) + ADDQ(tmpc, y) + + ADDQ(a, c) + ADDQ(rk2, b) + RORQ(Imm(18), b) + ADDQ(b, c) + + tmpy := GP64() + MOVQ(y, tmpy) + z := hashLen16Mul(tmpy, c, mul) + + e := GP64() + MOVQ(Mem{Base: sbase, Disp: 16}, e) + IMULQ(mul, e) + + f := GP64() + MOVQ(Mem{Base: sbase, Disp: 24}, f) + + base = GP64() + MOVQ(slen, base) + SUBQ(Imm(32), base) + ADDQ(sbase, base) + g := GP64() + MOVQ(Mem{Base: base}, g) + ADDQ(y, g) + IMULQ(mul, g) + + h := GP64() + MOVQ(Mem{Base: base, Disp: 8}, h) + ADDQ(z, h) + IMULQ(mul, h) + + r := hashLen16MulLine(e, f, g, h, a, mul) + Store(r, ReturnIndex(0)) + RET() +} + +// Return a 16-byte hash for s[0] ... s[31], a, and b. Quick and dirty. +func weakHashLen32WithSeeds(sbase GPVirtual, disp int, a, b GPVirtual) { + + w := Mem{Base: sbase, Disp: disp + 0} + x := Mem{Base: sbase, Disp: disp + 8} + y := Mem{Base: sbase, Disp: disp + 16} + z := Mem{Base: sbase, Disp: disp + 24} + + // a += w + ADDQ(w, a) + + // b = bits.RotateLeft64(b+a+z, -21) + ADDQ(a, b) + ADDQ(z, b) + RORQ(Imm(21), b) + + // c := a + c := GP64() + MOVQ(a, c) + + // a += x + // a += y + ADDQ(x, a) + ADDQ(y, a) + + // b += bits.RotateLeft64(a, -44) + atmp := GP64() + MOVQ(a, atmp) + RORQ(Imm(44), atmp) + ADDQ(atmp, b) + + // a += z + // b += c + ADDQ(z, a) + ADDQ(c, b) + + XCHGQ(a, b) +} + +func hashLoopBody(x, y, z, vlo, vhi, wlo, whi, sbase GPVirtual, mul1 GPVirtual, mul2 uint64) { + ADDQ(y, x) + ADDQ(vlo, x) + ADDQ(Mem{Base: sbase, Disp: 8}, x) + RORQ(Imm(37), x) + + IMULQ(mul1, x) + + ADDQ(vhi, y) + ADDQ(Mem{Base: sbase, Disp: 48}, y) + RORQ(Imm(42), y) + IMULQ(mul1, y) + + if mul2 != 1 { + t := GP64() + MOVQ(U32(mul2), t) + IMULQ(whi, t) + XORQ(t, x) + } else { + XORQ(whi, x) + } + + if mul2 != 1 { + t := GP64() + MOVQ(U32(mul2), t) + IMULQ(vlo, t) + ADDQ(t, y) + } else { + ADDQ(vlo, y) + } + + ADDQ(Mem{Base: sbase, Disp: 40}, y) + + ADDQ(wlo, z) + RORQ(Imm(33), z) + IMULQ(mul1, z) + + { + IMULQ(mul1, vhi) + MOVQ(x, vlo) + ADDQ(wlo, vlo) + weakHashLen32WithSeeds(sbase, 0, vhi, vlo) + } + + { + ADDQ(z, whi) + MOVQ(y, wlo) + ADDQ(Mem{Base: sbase, Disp: 16}, wlo) + weakHashLen32WithSeeds(sbase, 32, whi, wlo) + } + + XCHGQ(z, x) +} + +func fp64() { + + TEXT("Fingerprint64", NOSPLIT, "func(s []byte) uint64") + + slen := GP64() + sbase := GP64() + + Load(Param("s").Base(), sbase) + Load(Param("s").Len(), slen) + + CMPQ(slen, Imm(16)) + JG(LabelRef("check32")) + hashLen0to16(sbase, slen) + + Label("check32") + CMPQ(slen, Imm(32)) + JG(LabelRef("check64")) + hashLen17to32(sbase, slen) + + Label("check64") + CMPQ(slen, Imm(64)) + JG(LabelRef("long")) + hashLen33to64(sbase, slen) + + Label("long") + + seed := uint64(81) + + vlo, vhi, wlo, whi := GP64(), GP64(), GP64(), GP64() + XORQ(vlo, vlo) + XORQ(vhi, vhi) + XORQ(wlo, wlo) + XORQ(whi, whi) + + x := GP64() + + eightOne := uint64(81) + + MOVQ(Imm(eightOne*k2), x) + ADDQ(Mem{Base: sbase}, x) + + y := GP64() + y64 := uint64(seed*k1) + 113 + MOVQ(Imm(y64), y) + + z := GP64() + MOVQ(Imm(shiftMix64(y64*k2+113)*k2), z) + + endIdx := GP64() + MOVQ(slen, endIdx) + tmp := GP64() + SUBQ(Imm(1), endIdx) + MOVQ(U64(^uint64(63)), tmp) + ANDQ(tmp, endIdx) + last64Idx := GP64() + MOVQ(slen, last64Idx) + SUBQ(Imm(1), last64Idx) + ANDQ(Imm(63), last64Idx) + SUBQ(Imm(63), last64Idx) + ADDQ(endIdx, last64Idx) + + last64 := GP64() + MOVQ(last64Idx, last64) + ADDQ(sbase, last64) + + end := GP64() + MOVQ(slen, end) + + Label("loop") + + rk1 := GP64() + MOVQ(Imm(k1), rk1) + + hashLoopBody(x, y, z, vlo, vhi, wlo, whi, sbase, rk1, 1) + + ADDQ(Imm(64), sbase) + SUBQ(Imm(64), end) + CMPQ(end, Imm(64)) + JG(LabelRef("loop")) + + MOVQ(last64, sbase) + + mul := GP64() + MOVQ(z, mul) + ANDQ(Imm(0xff), mul) + SHLQ(Imm(1), mul) + ADDQ(rk1, mul) + + MOVQ(last64, sbase) + + SUBQ(Imm(1), slen) + ANDQ(Imm(63), slen) + ADDQ(slen, wlo) + + ADDQ(wlo, vlo) + ADDQ(vlo, wlo) + + hashLoopBody(x, y, z, vlo, vhi, wlo, whi, sbase, mul, 9) + + { + a := hashLen16Mul(vlo, wlo, mul) + ADDQ(z, a) + b := shiftMix(y) + rk0 := GP64() + MOVQ(Imm(k0), rk0) + IMULQ(rk0, b) + ADDQ(b, a) + + c := hashLen16Mul(vhi, whi, mul) + ADDQ(x, c) + + r := hashLen16Mul(a, c, mul) + Store(r, ReturnIndex(0)) + } + + RET() +} + +func fmix(h GPVirtual) GPVirtual { + h2 := GP32() + MOVL(h, h2) + SHRL(Imm(16), h2) + XORL(h2, h) + + MOVL(Imm(0x85ebca6b), h2) + IMULL(h2, h) + + MOVL(h, h2) + SHRL(Imm(13), h2) + XORL(h2, h) + + MOVL(Imm(0xc2b2ae35), h2) + IMULL(h2, h) + + MOVL(h, h2) + SHRL(Imm(16), h2) + XORL(h2, h) + return h +} + +func mur(a, h GPVirtual) GPVirtual { + imul3l(c1, a, a) + RORL(Imm(17), a) + imul3l(c2, a, a) + XORL(a, h) + RORL(Imm(19), h) + + LEAL(Mem{Base: h, Index: h, Scale: 4}, a) + LEAL(Mem{Base: a, Disp: 0xe6546b64}, h) + + return h +} + +func hash32Len5to12(sbase, slen GPVirtual) { + + a := GP32() + MOVL(slen.As32(), a) + b := GP32() + MOVL(a, b) + SHLL(Imm(2), b) + ADDL(a, b) + + c := GP32() + MOVL(U32(9), c) + + d := GP32() + MOVL(b, d) + + ADDL(Mem{Base: sbase, Disp: 0}, a) + + t := GP64() + MOVQ(slen, t) + SUBQ(Imm(4), t) + ADDQ(sbase, t) + ADDL(Mem{Base: t}, b) + + MOVQ(slen, t) + SHRQ(Imm(1), t) + ANDQ(Imm(4), t) + ADDQ(sbase, t) + ADDL(Mem{Base: t}, c) + + t = mur(a, d) + t = mur(b, t) + t = mur(c, t) + t = fmix(t) + + Store(t, ReturnIndex(0)) + RET() +} + +func hash32Len13to24Seed(sbase, slen GPVirtual) { + slen2 := GP64() + MOVQ(slen, slen2) + SHRQ(Imm(1), slen2) + ADDQ(sbase, slen2) + + a := GP32() + MOVL(Mem{Base: slen2, Disp: -4}, a) + + b := GP32() + MOVL(Mem{Base: sbase, Disp: 4}, b) + + send := GP64() + MOVQ(slen, send) + ADDQ(sbase, send) + + c := GP32() + MOVL(Mem{Base: send, Disp: -8}, c) + + d := GP32() + MOVL(Mem{Base: slen2}, d) + + e := GP32() + MOVL(Mem{Base: sbase}, e) + + f := GP32() + MOVL(Mem{Base: send, Disp: -4}, f) + + h := GP32() + MOVL(U32(c1), h) + IMULL(d, h) + ADDL(slen.As32(), h) + + RORL(Imm(12), a) + ADDL(f, a) + + ctmp := GP32() + MOVL(c, ctmp) + h = mur(ctmp, h) + ADDL(a, h) + + RORL(Imm(3), a) + ADDL(c, a) + + h = mur(e, h) + ADDL(a, h) + + ADDL(f, a) + RORL(Imm(12), a) + ADDL(d, a) + + h = mur(b, h) + ADDL(a, h) + + h = fmix(h) + + Store(h, ReturnIndex(0)) + RET() +} + +func hash32Len0to4(sbase, slen GPVirtual) { + b := GP32() + c := GP32() + + XORL(b, b) + MOVL(U32(9), c) + + TESTQ(slen, slen) + JZ(LabelRef("done")) + + l := GP64() + v := GP32() + MOVQ(slen, l) + + c1reg := GP32() + MOVL(U32(c1), c1reg) + + for i := 0; i < 4; i++ { + IMULL(c1reg, b) + MOVBLSX(Mem{Base: sbase, Disp: i}, v) + ADDL(v, b) + XORL(b, c) + SUBQ(Imm(1), l) + TESTQ(l, l) + JZ(LabelRef("done")) + } + + Label("done") + + s32 := GP32() + MOVL(slen.As32(), s32) + r := mur(s32, c) + r = mur(b, r) + r = fmix(r) + + Store(r, ReturnIndex(0)) + RET() +} + +func fp32() { + + TEXT("Fingerprint32", NOSPLIT, "func(s []byte) uint32") + + sbase := GP64() + slen := GP64() + + Load(Param("s").Base(), sbase) + Load(Param("s").Len(), slen) + + CMPQ(slen, Imm(24)) + JG(LabelRef("long")) + + CMPQ(slen, Imm(12)) + JG(LabelRef("hash_13_24")) + + CMPQ(slen, Imm(4)) + JG(LabelRef("hash_5_12")) + hash32Len0to4(sbase, slen) + + Label("hash_5_12") + hash32Len5to12(sbase, slen) + + Label("hash_13_24") + hash32Len13to24Seed(sbase, slen) + + Label("long") + + h := GP32() + MOVL(slen.As32(), h) + + g := GP32() + MOVL(U32(c1), g) + IMULL(h, g) + + f := GP32() + MOVL(g, f) + + // len > 24 + + send := GP64() + MOVQ(slen, send) + ADDQ(sbase, send) + c1reg := GP32() + MOVL(U32(c1), c1reg) + c2reg := GP32() + MOVL(U32(c2), c2reg) + + shuf := func(r GPVirtual, disp int) { + a := GP32() + MOVL(Mem{Base: send, Disp: disp}, a) + IMULL(c1reg, a) + RORL(Imm(17), a) + IMULL(c2reg, a) + XORL(a, r) + RORL(Imm(19), r) + MOVL(r, a) + SHLL(Imm(2), a) + ADDL(a, r) + ADDL(Imm(0xe6546b64), r) + } + + shuf(h, -4) + shuf(g, -8) + shuf(h, -16) + shuf(g, -12) + + PREFETCHT0(Mem{Base: sbase}) + { + a := GP32() + MOVL(Mem{Base: send, Disp: -20}, a) + IMULL(c1reg, a) + RORL(Imm(17), a) + IMULL(c2reg, a) + + ADDL(a, f) + RORL(Imm(19), f) + ADDL(Imm(113), f) + + } + + loop32Body := func(f, g, h, sbase, slen GPVirtual, disp int) { + a, b, c, d, e := GP32(), GP32(), GP32(), GP32(), GP32() + + MOVL(Mem{Base: sbase, Disp: disp + 0}, a) + ADDL(a, h) + + MOVL(Mem{Base: sbase, Disp: disp + 4}, b) + ADDL(b, g) + + MOVL(Mem{Base: sbase, Disp: disp + 8}, c) + ADDL(c, f) + + MOVL(Mem{Base: sbase, Disp: disp + 12}, d) + t := GP32() + MOVL(d, t) + h = mur(t, h) + + MOVL(Mem{Base: sbase, Disp: disp + 16}, e) + ADDL(e, h) + + MOVL(c, t) + g = mur(t, g) + ADDL(a, g) + + imul3l(c1, e, t) + ADDL(b, t) + f = mur(t, f) + ADDL(d, f) + + ADDL(g, f) + ADDL(f, g) + } + + Label("loop80") + CMPQ(slen, Imm(80+20)) + JL(LabelRef("loop20")) + { + PREFETCHT0(Mem{Base: sbase, Disp: 20}) + loop32Body(f, g, h, sbase, slen, 0) + PREFETCHT0(Mem{Base: sbase, Disp: 40}) + loop32Body(f, g, h, sbase, slen, 20) + PREFETCHT0(Mem{Base: sbase, Disp: 60}) + loop32Body(f, g, h, sbase, slen, 40) + PREFETCHT0(Mem{Base: sbase, Disp: 80}) + loop32Body(f, g, h, sbase, slen, 60) + + ADDQ(Imm(80), sbase) + SUBQ(Imm(80), slen) + JMP(LabelRef("loop80")) + } + + Label("loop20") + CMPQ(slen, Imm(20)) + JLE(LabelRef("after")) + { + loop32Body(f, g, h, sbase, slen, 0) + + ADDQ(Imm(20), sbase) + SUBQ(Imm(20), slen) + JMP(LabelRef("loop20")) + } + + Label("after") + + c1reg = GP32() + MOVL(U32(c1), c1reg) + + RORL(Imm(11), g) + IMULL(c1reg, g) + + RORL(Imm(17), g) + IMULL(c1reg, g) + + RORL(Imm(11), f) + IMULL(c1reg, f) + + RORL(Imm(17), f) + IMULL(c1reg, f) + + ADDL(g, h) + RORL(Imm(19), h) + + t := GP32() + MOVL(h, t) + SHLL(Imm(2), t) + ADDL(t, h) + ADDL(Imm(0xe6546b64), h) + + RORL(Imm(17), h) + IMULL(c1reg, h) + + ADDL(f, h) + RORL(Imm(19), h) + + t = GP32() + MOVL(h, t) + SHLL(Imm(2), t) + ADDL(t, h) + ADDL(Imm(0xe6546b64), h) + + RORL(Imm(17), h) + IMULL(c1reg, h) + + Store(h, ReturnIndex(0)) + RET() +} + +var go111 = flag.Bool("go111", true, "use assembly instructions present in go1.11 and later") + +func imul3l(m uint32, x, y Register) { + if *go111 { + IMUL3L(U32(m), x, y) + } else { + t := GP32() + MOVL(U32(m), t) + IMULL(t, x) + MOVL(x, y) + } +} + +func main() { + + flag.Parse() + + ConstraintExpr("amd64,!purego") + + fp64() + fp32() + + Generate() +} diff --git a/vendor/github.com/dgryski/go-farm/basics.go b/vendor/github.com/dgryski/go-farm/basics.go new file mode 100644 index 00000000..ec7076c0 --- /dev/null +++ b/vendor/github.com/dgryski/go-farm/basics.go @@ -0,0 +1,32 @@ +package farm + +import "math/bits" + +// Some primes between 2^63 and 2^64 for various uses. +const k0 uint64 = 0xc3a5c85c97cb3127 +const k1 uint64 = 0xb492b66fbe98f273 +const k2 uint64 = 0x9ae16a3b2f90404f + +// Magic numbers for 32-bit hashing. Copied from Murmur3. +const c1 uint32 = 0xcc9e2d51 +const c2 uint32 = 0x1b873593 + +// A 32-bit to 32-bit integer hash copied from Murmur3. +func fmix(h uint32) uint32 { + h ^= h >> 16 + h *= 0x85ebca6b + h ^= h >> 13 + h *= 0xc2b2ae35 + h ^= h >> 16 + return h +} + +func mur(a, h uint32) uint32 { + // Helper from Murmur3 for combining two 32-bit values. + a *= c1 + a = bits.RotateLeft32(a, -17) + a *= c2 + h ^= a + h = bits.RotateLeft32(h, -19) + return h*5 + 0xe6546b64 +} diff --git a/vendor/github.com/dgryski/go-farm/farmhashcc.go b/vendor/github.com/dgryski/go-farm/farmhashcc.go new file mode 100644 index 00000000..3e68ae3a --- /dev/null +++ b/vendor/github.com/dgryski/go-farm/farmhashcc.go @@ -0,0 +1,194 @@ +package farm + +import ( + "encoding/binary" + "math/bits" +) + +// This file provides a 32-bit hash equivalent to CityHash32 (v1.1.1) +// and a 128-bit hash equivalent to CityHash128 (v1.1.1). It also provides +// a seeded 32-bit hash function similar to CityHash32. + +func hash32Len13to24Seed(s []byte, seed uint32) uint32 { + slen := len(s) + a := binary.LittleEndian.Uint32(s[-4+(slen>>1) : -4+(slen>>1)+4]) + b := binary.LittleEndian.Uint32(s[4 : 4+4]) + c := binary.LittleEndian.Uint32(s[slen-8 : slen-8+4]) + d := binary.LittleEndian.Uint32(s[(slen >> 1) : (slen>>1)+4]) + e := binary.LittleEndian.Uint32(s[0 : 0+4]) + f := binary.LittleEndian.Uint32(s[slen-4 : slen-4+4]) + h := d*c1 + uint32(slen) + seed + a = bits.RotateLeft32(a, -12) + f + h = mur(c, h) + a + a = bits.RotateLeft32(a, -3) + c + h = mur(e, h) + a + a = bits.RotateLeft32(a+f, -12) + d + h = mur(b^seed, h) + a + return fmix(h) +} + +func hash32Len0to4(s []byte, seed uint32) uint32 { + slen := len(s) + b := seed + c := uint32(9) + for i := 0; i < slen; i++ { + v := int8(s[i]) + b = (b * c1) + uint32(v) + c ^= b + } + return fmix(mur(b, mur(uint32(slen), c))) +} + +func hash128to64(x uint128) uint64 { + // Murmur-inspired hashing. + const mul uint64 = 0x9ddfea08eb382d69 + a := (x.lo ^ x.hi) * mul + a ^= (a >> 47) + b := (x.hi ^ a) * mul + b ^= (b >> 47) + b *= mul + return b +} + +type uint128 struct { + lo uint64 + hi uint64 +} + +// A subroutine for CityHash128(). Returns a decent 128-bit hash for strings +// of any length representable in signed long. Based on City and Murmur. +func cityMurmur(s []byte, seed uint128) uint128 { + slen := len(s) + a := seed.lo + b := seed.hi + var c uint64 + var d uint64 + l := slen - 16 + if l <= 0 { // len <= 16 + a = shiftMix(a*k1) * k1 + c = b*k1 + hashLen0to16(s) + if slen >= 8 { + d = shiftMix(a + binary.LittleEndian.Uint64(s[0:0+8])) + } else { + d = shiftMix(a + c) + } + } else { // len > 16 + c = hashLen16(binary.LittleEndian.Uint64(s[slen-8:slen-8+8])+k1, a) + d = hashLen16(b+uint64(slen), c+binary.LittleEndian.Uint64(s[slen-16:slen-16+8])) + a += d + for { + a ^= shiftMix(binary.LittleEndian.Uint64(s[0:0+8])*k1) * k1 + a *= k1 + b ^= a + c ^= shiftMix(binary.LittleEndian.Uint64(s[8:8+8])*k1) * k1 + c *= k1 + d ^= c + s = s[16:] + l -= 16 + if l <= 0 { + break + } + } + } + a = hashLen16(a, c) + b = hashLen16(d, b) + return uint128{a ^ b, hashLen16(b, a)} +} + +func cityHash128WithSeed(s []byte, seed uint128) uint128 { + slen := len(s) + if slen < 128 { + return cityMurmur(s, seed) + } + + endIdx := ((slen - 1) / 128) * 128 + lastBlockIdx := endIdx + ((slen - 1) & 127) - 127 + last := s[lastBlockIdx:] + + // We expect len >= 128 to be the common case. Keep 56 bytes of state: + // v, w, x, y, and z. + var v1, v2 uint64 + var w1, w2 uint64 + x := seed.lo + y := seed.hi + z := uint64(slen) * k1 + v1 = bits.RotateLeft64(y^k1, -49)*k1 + binary.LittleEndian.Uint64(s[0:0+8]) + v2 = bits.RotateLeft64(v1, -42)*k1 + binary.LittleEndian.Uint64(s[8:8+8]) + w1 = bits.RotateLeft64(y+z, -35)*k1 + x + w2 = bits.RotateLeft64(x+binary.LittleEndian.Uint64(s[88:88+8]), -53) * k1 + + // This is the same inner loop as CityHash64(), manually unrolled. + for { + x = bits.RotateLeft64(x+y+v1+binary.LittleEndian.Uint64(s[8:8+8]), -37) * k1 + y = bits.RotateLeft64(y+v2+binary.LittleEndian.Uint64(s[48:48+8]), -42) * k1 + x ^= w2 + y += v1 + binary.LittleEndian.Uint64(s[40:40+8]) + z = bits.RotateLeft64(z+w1, -33) * k1 + v1, v2 = weakHashLen32WithSeeds(s, v2*k1, x+w1) + w1, w2 = weakHashLen32WithSeeds(s[32:], z+w2, y+binary.LittleEndian.Uint64(s[16:16+8])) + z, x = x, z + s = s[64:] + x = bits.RotateLeft64(x+y+v1+binary.LittleEndian.Uint64(s[8:8+8]), -37) * k1 + y = bits.RotateLeft64(y+v2+binary.LittleEndian.Uint64(s[48:48+8]), -42) * k1 + x ^= w2 + y += v1 + binary.LittleEndian.Uint64(s[40:40+8]) + z = bits.RotateLeft64(z+w1, -33) * k1 + v1, v2 = weakHashLen32WithSeeds(s, v2*k1, x+w1) + w1, w2 = weakHashLen32WithSeeds(s[32:], z+w2, y+binary.LittleEndian.Uint64(s[16:16+8])) + z, x = x, z + s = s[64:] + slen -= 128 + if slen < 128 { + break + } + } + x += bits.RotateLeft64(v1+z, -49) * k0 + y = y*k0 + bits.RotateLeft64(w2, -37) + z = z*k0 + bits.RotateLeft64(w1, -27) + w1 *= 9 + v1 *= k0 + // If 0 < len < 128, hash up to 4 chunks of 32 bytes each from the end of s. + for tailDone := 0; tailDone < slen; { + tailDone += 32 + y = bits.RotateLeft64(x+y, -42)*k0 + v2 + w1 += binary.LittleEndian.Uint64(last[128-tailDone+16 : 128-tailDone+16+8]) + x = x*k0 + w1 + z += w2 + binary.LittleEndian.Uint64(last[128-tailDone:128-tailDone+8]) + w2 += v1 + v1, v2 = weakHashLen32WithSeeds(last[128-tailDone:], v1+z, v2) + v1 *= k0 + } + + // At this point our 56 bytes of state should contain more than + // enough information for a strong 128-bit hash. We use two + // different 56-byte-to-8-byte hashes to get a 16-byte final result. + x = hashLen16(x, v1) + y = hashLen16(y+z, w1) + return uint128{hashLen16(x+v2, w2) + y, + hashLen16(x+w2, y+v2)} +} + +func cityHash128(s []byte) uint128 { + slen := len(s) + if slen >= 16 { + return cityHash128WithSeed(s[16:], uint128{binary.LittleEndian.Uint64(s[0 : 0+8]), binary.LittleEndian.Uint64(s[8:8+8]) + k0}) + } + return cityHash128WithSeed(s, uint128{k0, k1}) +} + +// Fingerprint128 is a 128-bit fingerprint function for byte-slices +func Fingerprint128(s []byte) (lo, hi uint64) { + h := cityHash128(s) + return h.lo, h.hi +} + +// Hash128 is a 128-bit hash function for byte-slices +func Hash128(s []byte) (lo, hi uint64) { + return Fingerprint128(s) +} + +// Hash128WithSeed is a 128-bit hash function for byte-slices and a 128-bit seed +func Hash128WithSeed(s []byte, seed0, seed1 uint64) (lo, hi uint64) { + h := cityHash128WithSeed(s, uint128{seed0, seed1}) + return h.lo, h.hi +} diff --git a/vendor/github.com/dgryski/go-farm/farmhashmk.go b/vendor/github.com/dgryski/go-farm/farmhashmk.go new file mode 100644 index 00000000..8e4c7428 --- /dev/null +++ b/vendor/github.com/dgryski/go-farm/farmhashmk.go @@ -0,0 +1,102 @@ +package farm + +import ( + "encoding/binary" + "math/bits" +) + +func hash32Len5to12(s []byte, seed uint32) uint32 { + slen := len(s) + a := uint32(len(s)) + b := uint32(len(s) * 5) + c := uint32(9) + d := b + seed + a += binary.LittleEndian.Uint32(s[0 : 0+4]) + b += binary.LittleEndian.Uint32(s[slen-4 : slen-4+4]) + c += binary.LittleEndian.Uint32(s[((slen >> 1) & 4) : ((slen>>1)&4)+4]) + return fmix(seed ^ mur(c, mur(b, mur(a, d)))) +} + +// Hash32 hashes a byte slice and returns a uint32 hash value +func Hash32(s []byte) uint32 { + + slen := len(s) + + if slen <= 24 { + if slen <= 12 { + if slen <= 4 { + return hash32Len0to4(s, 0) + } + return hash32Len5to12(s, 0) + } + return hash32Len13to24Seed(s, 0) + } + + // len > 24 + h := uint32(slen) + g := c1 * uint32(slen) + f := g + a0 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-4:slen-4+4])*c1, -17) * c2 + a1 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-8:slen-8+4])*c1, -17) * c2 + a2 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-16:slen-16+4])*c1, -17) * c2 + a3 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-12:slen-12+4])*c1, -17) * c2 + a4 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-20:slen-20+4])*c1, -17) * c2 + h ^= a0 + h = bits.RotateLeft32(h, -19) + h = h*5 + 0xe6546b64 + h ^= a2 + h = bits.RotateLeft32(h, -19) + h = h*5 + 0xe6546b64 + g ^= a1 + g = bits.RotateLeft32(g, -19) + g = g*5 + 0xe6546b64 + g ^= a3 + g = bits.RotateLeft32(g, -19) + g = g*5 + 0xe6546b64 + f += a4 + f = bits.RotateLeft32(f, -19) + 113 + for len(s) > 20 { + a := binary.LittleEndian.Uint32(s[0 : 0+4]) + b := binary.LittleEndian.Uint32(s[4 : 4+4]) + c := binary.LittleEndian.Uint32(s[8 : 8+4]) + d := binary.LittleEndian.Uint32(s[12 : 12+4]) + e := binary.LittleEndian.Uint32(s[16 : 16+4]) + h += a + g += b + f += c + h = mur(d, h) + e + g = mur(c, g) + a + f = mur(b+e*c1, f) + d + f += g + g += f + s = s[20:] + } + g = bits.RotateLeft32(g, -11) * c1 + g = bits.RotateLeft32(g, -17) * c1 + f = bits.RotateLeft32(f, -11) * c1 + f = bits.RotateLeft32(f, -17) * c1 + h = bits.RotateLeft32(h+g, -19) + h = h*5 + 0xe6546b64 + h = bits.RotateLeft32(h, -17) * c1 + h = bits.RotateLeft32(h+f, -19) + h = h*5 + 0xe6546b64 + h = bits.RotateLeft32(h, -17) * c1 + return h +} + +// Hash32WithSeed hashes a byte slice and a uint32 seed and returns a uint32 hash value +func Hash32WithSeed(s []byte, seed uint32) uint32 { + slen := len(s) + + if slen <= 24 { + if slen >= 13 { + return hash32Len13to24Seed(s, seed*c1) + } + if slen >= 5 { + return hash32Len5to12(s, seed) + } + return hash32Len0to4(s, seed) + } + h := hash32Len13to24Seed(s[:24], seed^uint32(slen)) + return mur(Hash32(s[24:])+seed, h) +} diff --git a/vendor/github.com/dgryski/go-farm/farmhashna.go b/vendor/github.com/dgryski/go-farm/farmhashna.go new file mode 100644 index 00000000..ac62edd3 --- /dev/null +++ b/vendor/github.com/dgryski/go-farm/farmhashna.go @@ -0,0 +1,161 @@ +package farm + +import ( + "encoding/binary" + "math/bits" +) + +func shiftMix(val uint64) uint64 { + return val ^ (val >> 47) +} + +func hashLen16(u, v uint64) uint64 { + return hash128to64(uint128{u, v}) +} + +func hashLen16Mul(u, v, mul uint64) uint64 { + // Murmur-inspired hashing. + a := (u ^ v) * mul + a ^= (a >> 47) + b := (v ^ a) * mul + b ^= (b >> 47) + b *= mul + return b +} + +func hashLen0to16(s []byte) uint64 { + slen := uint64(len(s)) + if slen >= 8 { + mul := k2 + slen*2 + a := binary.LittleEndian.Uint64(s[0:0+8]) + k2 + b := binary.LittleEndian.Uint64(s[int(slen-8) : int(slen-8)+8]) + c := bits.RotateLeft64(b, -37)*mul + a + d := (bits.RotateLeft64(a, -25) + b) * mul + return hashLen16Mul(c, d, mul) + } + + if slen >= 4 { + mul := k2 + slen*2 + a := binary.LittleEndian.Uint32(s[0 : 0+4]) + return hashLen16Mul(slen+(uint64(a)<<3), uint64(binary.LittleEndian.Uint32(s[int(slen-4):int(slen-4)+4])), mul) + } + if slen > 0 { + a := s[0] + b := s[slen>>1] + c := s[slen-1] + y := uint32(a) + (uint32(b) << 8) + z := uint32(slen) + (uint32(c) << 2) + return shiftMix(uint64(y)*k2^uint64(z)*k0) * k2 + } + return k2 +} + +// This probably works well for 16-byte strings as well, but it may be overkill +// in that case. +func hashLen17to32(s []byte) uint64 { + slen := len(s) + mul := k2 + uint64(slen*2) + a := binary.LittleEndian.Uint64(s[0:0+8]) * k1 + b := binary.LittleEndian.Uint64(s[8 : 8+8]) + c := binary.LittleEndian.Uint64(s[slen-8:slen-8+8]) * mul + d := binary.LittleEndian.Uint64(s[slen-16:slen-16+8]) * k2 + return hashLen16Mul(bits.RotateLeft64(a+b, -43)+bits.RotateLeft64(c, -30)+d, a+bits.RotateLeft64(b+k2, -18)+c, mul) +} + +// Return a 16-byte hash for 48 bytes. Quick and dirty. +// Callers do best to use "random-looking" values for a and b. +func weakHashLen32WithSeedsWords(w, x, y, z, a, b uint64) (uint64, uint64) { + a += w + b = bits.RotateLeft64(b+a+z, -21) + c := a + a += x + a += y + b += bits.RotateLeft64(a, -44) + return a + z, b + c +} + +// Return a 16-byte hash for s[0] ... s[31], a, and b. Quick and dirty. +func weakHashLen32WithSeeds(s []byte, a, b uint64) (uint64, uint64) { + return weakHashLen32WithSeedsWords(binary.LittleEndian.Uint64(s[0:0+8]), + binary.LittleEndian.Uint64(s[8:8+8]), + binary.LittleEndian.Uint64(s[16:16+8]), + binary.LittleEndian.Uint64(s[24:24+8]), + a, + b) +} + +// Return an 8-byte hash for 33 to 64 bytes. +func hashLen33to64(s []byte) uint64 { + slen := len(s) + mul := k2 + uint64(slen)*2 + a := binary.LittleEndian.Uint64(s[0:0+8]) * k2 + b := binary.LittleEndian.Uint64(s[8 : 8+8]) + c := binary.LittleEndian.Uint64(s[slen-8:slen-8+8]) * mul + d := binary.LittleEndian.Uint64(s[slen-16:slen-16+8]) * k2 + y := bits.RotateLeft64(a+b, -43) + bits.RotateLeft64(c, -30) + d + z := hashLen16Mul(y, a+bits.RotateLeft64(b+k2, -18)+c, mul) + e := binary.LittleEndian.Uint64(s[16:16+8]) * mul + f := binary.LittleEndian.Uint64(s[24 : 24+8]) + g := (y + binary.LittleEndian.Uint64(s[slen-32:slen-32+8])) * mul + h := (z + binary.LittleEndian.Uint64(s[slen-24:slen-24+8])) * mul + return hashLen16Mul(bits.RotateLeft64(e+f, -43)+bits.RotateLeft64(g, -30)+h, e+bits.RotateLeft64(f+a, -18)+g, mul) +} + +func naHash64(s []byte) uint64 { + slen := len(s) + var seed uint64 = 81 + if slen <= 32 { + if slen <= 16 { + return hashLen0to16(s) + } + return hashLen17to32(s) + } + if slen <= 64 { + return hashLen33to64(s) + } + // For strings over 64 bytes we loop. + // Internal state consists of 56 bytes: v, w, x, y, and z. + v := uint128{0, 0} + w := uint128{0, 0} + x := seed*k2 + binary.LittleEndian.Uint64(s[0:0+8]) + y := seed*k1 + 113 + z := shiftMix(y*k2+113) * k2 + // Set end so that after the loop we have 1 to 64 bytes left to process. + endIdx := ((slen - 1) / 64) * 64 + last64Idx := endIdx + ((slen - 1) & 63) - 63 + last64 := s[last64Idx:] + for len(s) > 64 { + x = bits.RotateLeft64(x+y+v.lo+binary.LittleEndian.Uint64(s[8:8+8]), -37) * k1 + y = bits.RotateLeft64(y+v.hi+binary.LittleEndian.Uint64(s[48:48+8]), -42) * k1 + x ^= w.hi + y += v.lo + binary.LittleEndian.Uint64(s[40:40+8]) + z = bits.RotateLeft64(z+w.lo, -33) * k1 + v.lo, v.hi = weakHashLen32WithSeeds(s, v.hi*k1, x+w.lo) + w.lo, w.hi = weakHashLen32WithSeeds(s[32:], z+w.hi, y+binary.LittleEndian.Uint64(s[16:16+8])) + x, z = z, x + s = s[64:] + } + mul := k1 + ((z & 0xff) << 1) + // Make s point to the last 64 bytes of input. + s = last64 + w.lo += (uint64(slen-1) & 63) + v.lo += w.lo + w.lo += v.lo + x = bits.RotateLeft64(x+y+v.lo+binary.LittleEndian.Uint64(s[8:8+8]), -37) * mul + y = bits.RotateLeft64(y+v.hi+binary.LittleEndian.Uint64(s[48:48+8]), -42) * mul + x ^= w.hi * 9 + y += v.lo*9 + binary.LittleEndian.Uint64(s[40:40+8]) + z = bits.RotateLeft64(z+w.lo, -33) * mul + v.lo, v.hi = weakHashLen32WithSeeds(s, v.hi*mul, x+w.lo) + w.lo, w.hi = weakHashLen32WithSeeds(s[32:], z+w.hi, y+binary.LittleEndian.Uint64(s[16:16+8])) + x, z = z, x + return hashLen16Mul(hashLen16Mul(v.lo, w.lo, mul)+shiftMix(y)*k0+z, hashLen16Mul(v.hi, w.hi, mul)+x, mul) +} + +func naHash64WithSeed(s []byte, seed uint64) uint64 { + return naHash64WithSeeds(s, k2, seed) +} + +func naHash64WithSeeds(s []byte, seed0, seed1 uint64) uint64 { + return hashLen16(naHash64(s)-seed0, seed1) +} diff --git a/vendor/github.com/dgryski/go-farm/farmhashuo.go b/vendor/github.com/dgryski/go-farm/farmhashuo.go new file mode 100644 index 00000000..474b74e0 --- /dev/null +++ b/vendor/github.com/dgryski/go-farm/farmhashuo.go @@ -0,0 +1,122 @@ +package farm + +import ( + "encoding/binary" + "math/bits" +) + +func uoH(x, y, mul uint64, r uint) uint64 { + a := (x ^ y) * mul + a ^= (a >> 47) + b := (y ^ a) * mul + return bits.RotateLeft64(b, -int(r)) * mul +} + +// Hash64WithSeeds hashes a byte slice and two uint64 seeds and returns a uint64 hash value +func Hash64WithSeeds(s []byte, seed0, seed1 uint64) uint64 { + slen := len(s) + if slen <= 64 { + return naHash64WithSeeds(s, seed0, seed1) + } + + // For strings over 64 bytes we loop. + // Internal state consists of 64 bytes: u, v, w, x, y, and z. + x := seed0 + y := seed1*k2 + 113 + z := shiftMix(y*k2) * k2 + v := uint128{seed0, seed1} + var w uint128 + u := x - z + x *= k2 + mul := k2 + (u & 0x82) + + // Set end so that after the loop we have 1 to 64 bytes left to process. + endIdx := ((slen - 1) / 64) * 64 + last64Idx := endIdx + ((slen - 1) & 63) - 63 + last64 := s[last64Idx:] + + for len(s) > 64 { + a0 := binary.LittleEndian.Uint64(s[0 : 0+8]) + a1 := binary.LittleEndian.Uint64(s[8 : 8+8]) + a2 := binary.LittleEndian.Uint64(s[16 : 16+8]) + a3 := binary.LittleEndian.Uint64(s[24 : 24+8]) + a4 := binary.LittleEndian.Uint64(s[32 : 32+8]) + a5 := binary.LittleEndian.Uint64(s[40 : 40+8]) + a6 := binary.LittleEndian.Uint64(s[48 : 48+8]) + a7 := binary.LittleEndian.Uint64(s[56 : 56+8]) + x += a0 + a1 + y += a2 + z += a3 + v.lo += a4 + v.hi += a5 + a1 + w.lo += a6 + w.hi += a7 + + x = bits.RotateLeft64(x, -26) + x *= 9 + y = bits.RotateLeft64(y, -29) + z *= mul + v.lo = bits.RotateLeft64(v.lo, -33) + v.hi = bits.RotateLeft64(v.hi, -30) + w.lo ^= x + w.lo *= 9 + z = bits.RotateLeft64(z, -32) + z += w.hi + w.hi += z + z *= 9 + u, y = y, u + + z += a0 + a6 + v.lo += a2 + v.hi += a3 + w.lo += a4 + w.hi += a5 + a6 + x += a1 + y += a7 + + y += v.lo + v.lo += x - y + v.hi += w.lo + w.lo += v.hi + w.hi += x - y + x += w.hi + w.hi = bits.RotateLeft64(w.hi, -34) + u, z = z, u + s = s[64:] + } + // Make s point to the last 64 bytes of input. + s = last64 + u *= 9 + v.hi = bits.RotateLeft64(v.hi, -28) + v.lo = bits.RotateLeft64(v.lo, -20) + w.lo += (uint64(slen-1) & 63) + u += y + y += u + x = bits.RotateLeft64(y-x+v.lo+binary.LittleEndian.Uint64(s[8:8+8]), -37) * mul + y = bits.RotateLeft64(y^v.hi^binary.LittleEndian.Uint64(s[48:48+8]), -42) * mul + x ^= w.hi * 9 + y += v.lo + binary.LittleEndian.Uint64(s[40:40+8]) + z = bits.RotateLeft64(z+w.lo, -33) * mul + v.lo, v.hi = weakHashLen32WithSeeds(s, v.hi*mul, x+w.lo) + w.lo, w.hi = weakHashLen32WithSeeds(s[32:], z+w.hi, y+binary.LittleEndian.Uint64(s[16:16+8])) + return uoH(hashLen16Mul(v.lo+x, w.lo^y, mul)+z-u, + uoH(v.hi+y, w.hi+z, k2, 30)^x, + k2, + 31) +} + +// Hash64WithSeed hashes a byte slice and a uint64 seed and returns a uint64 hash value +func Hash64WithSeed(s []byte, seed uint64) uint64 { + if len(s) <= 64 { + return naHash64WithSeed(s, seed) + } + return Hash64WithSeeds(s, 0, seed) +} + +// Hash64 hashes a byte slice and returns a uint64 hash value +func Hash64(s []byte) uint64 { + if len(s) <= 64 { + return naHash64(s) + } + return Hash64WithSeeds(s, 81, 0) +} diff --git a/vendor/github.com/dgryski/go-farm/fp_amd64.s b/vendor/github.com/dgryski/go-farm/fp_amd64.s new file mode 100644 index 00000000..2b8fa324 --- /dev/null +++ b/vendor/github.com/dgryski/go-farm/fp_amd64.s @@ -0,0 +1,951 @@ +// Code generated by command: go run asm.go -out=fp_amd64.s -go111=false. DO NOT EDIT. + +// +build amd64,!purego + +#include "textflag.h" + +// func Fingerprint64(s []byte) uint64 +TEXT ·Fingerprint64(SB), NOSPLIT, $0-32 + MOVQ s_base+0(FP), CX + MOVQ s_len+8(FP), AX + CMPQ AX, $0x10 + JG check32 + CMPQ AX, $0x08 + JL check4 + MOVQ (CX), DX + MOVQ AX, BX + SUBQ $0x08, BX + ADDQ CX, BX + MOVQ (BX), BX + MOVQ $0x9ae16a3b2f90404f, BP + ADDQ BP, DX + SHLQ $0x01, AX + ADDQ BP, AX + MOVQ BX, BP + RORQ $0x25, BP + IMULQ AX, BP + ADDQ DX, BP + RORQ $0x19, DX + ADDQ BX, DX + IMULQ AX, DX + XORQ DX, BP + IMULQ AX, BP + MOVQ BP, BX + SHRQ $0x2f, BX + XORQ BP, BX + XORQ BX, DX + IMULQ AX, DX + MOVQ DX, BX + SHRQ $0x2f, BX + XORQ DX, BX + IMULQ AX, BX + MOVQ BX, ret+24(FP) + RET + +check4: + CMPQ AX, $0x04 + JL check0 + MOVQ $0x9ae16a3b2f90404f, DX + MOVQ AX, BX + SHLQ $0x01, BX + ADDQ DX, BX + MOVL (CX), SI + SHLQ $0x03, SI + ADDQ AX, SI + SUBQ $0x04, AX + ADDQ AX, CX + MOVL (CX), DI + XORQ DI, SI + IMULQ BX, SI + MOVQ SI, DX + SHRQ $0x2f, DX + XORQ SI, DX + XORQ DX, DI + IMULQ BX, DI + MOVQ DI, DX + SHRQ $0x2f, DX + XORQ DI, DX + IMULQ BX, DX + MOVQ DX, ret+24(FP) + RET + +check0: + TESTQ AX, AX + JZ empty + MOVBQZX (CX), DX + MOVQ AX, BX + SHRQ $0x01, BX + ADDQ CX, BX + MOVBQZX (BX), BP + MOVQ AX, BX + SUBQ $0x01, BX + ADDQ CX, BX + MOVBQZX (BX), BX + SHLQ $0x08, BP + ADDQ BP, DX + SHLQ $0x02, BX + ADDQ BX, AX + MOVQ $0xc3a5c85c97cb3127, BX + IMULQ BX, AX + MOVQ $0x9ae16a3b2f90404f, BX + IMULQ BX, DX + XORQ DX, AX + MOVQ AX, DX + SHRQ $0x2f, DX + XORQ AX, DX + IMULQ BX, DX + MOVQ DX, ret+24(FP) + RET + +empty: + MOVQ $0x9ae16a3b2f90404f, DX + MOVQ DX, ret+24(FP) + RET + +check32: + CMPQ AX, $0x20 + JG check64 + MOVQ AX, DX + SHLQ $0x01, DX + MOVQ $0x9ae16a3b2f90404f, BX + ADDQ BX, DX + MOVQ (CX), BP + MOVQ $0xb492b66fbe98f273, SI + IMULQ SI, BP + MOVQ 8(CX), SI + MOVQ AX, DI + SUBQ $0x10, DI + ADDQ CX, DI + MOVQ 8(DI), R12 + IMULQ DX, R12 + MOVQ (DI), DI + IMULQ BX, DI + MOVQ BP, R13 + ADDQ SI, R13 + RORQ $0x2b, R13 + ADDQ DI, R13 + MOVQ R12, DI + RORQ $0x1e, DI + ADDQ DI, R13 + ADDQ R12, BP + ADDQ BX, SI + RORQ $0x12, SI + ADDQ SI, BP + XORQ BP, R13 + IMULQ DX, R13 + MOVQ R13, BX + SHRQ $0x2f, BX + XORQ R13, BX + XORQ BX, BP + IMULQ DX, BP + MOVQ BP, BX + SHRQ $0x2f, BX + XORQ BP, BX + IMULQ DX, BX + MOVQ BX, ret+24(FP) + RET + +check64: + CMPQ AX, $0x40 + JG long + MOVQ AX, DX + SHLQ $0x01, DX + MOVQ $0x9ae16a3b2f90404f, BX + ADDQ BX, DX + MOVQ (CX), BP + IMULQ BX, BP + MOVQ 8(CX), SI + MOVQ AX, DI + SUBQ $0x10, DI + ADDQ CX, DI + MOVQ 8(DI), R12 + IMULQ DX, R12 + MOVQ (DI), DI + IMULQ BX, DI + MOVQ BP, R13 + ADDQ SI, R13 + RORQ $0x2b, R13 + ADDQ DI, R13 + MOVQ R12, DI + RORQ $0x1e, DI + ADDQ DI, R13 + ADDQ BP, R12 + ADDQ BX, SI + RORQ $0x12, SI + ADDQ SI, R12 + MOVQ R13, BX + XORQ R12, BX + IMULQ DX, BX + MOVQ BX, SI + SHRQ $0x2f, SI + XORQ BX, SI + XORQ SI, R12 + IMULQ DX, R12 + MOVQ R12, BX + SHRQ $0x2f, BX + XORQ R12, BX + IMULQ DX, BX + MOVQ 16(CX), SI + IMULQ DX, SI + MOVQ 24(CX), DI + MOVQ AX, R12 + SUBQ $0x20, R12 + ADDQ CX, R12 + MOVQ (R12), R14 + ADDQ R13, R14 + IMULQ DX, R14 + MOVQ 8(R12), R12 + ADDQ BX, R12 + IMULQ DX, R12 + MOVQ SI, BX + ADDQ DI, BX + RORQ $0x2b, BX + ADDQ R12, BX + MOVQ R14, R12 + RORQ $0x1e, R12 + ADDQ R12, BX + ADDQ R14, SI + ADDQ BP, DI + RORQ $0x12, DI + ADDQ DI, SI + XORQ SI, BX + IMULQ DX, BX + MOVQ BX, BP + SHRQ $0x2f, BP + XORQ BX, BP + XORQ BP, SI + IMULQ DX, SI + MOVQ SI, BX + SHRQ $0x2f, BX + XORQ SI, BX + IMULQ DX, BX + MOVQ BX, ret+24(FP) + RET + +long: + XORQ R8, R8 + XORQ R9, R9 + XORQ R10, R10 + XORQ R11, R11 + MOVQ $0x01529cba0ca458ff, DX + ADDQ (CX), DX + MOVQ $0x226bb95b4e64b6d4, BX + MOVQ $0x134a747f856d0526, BP + MOVQ AX, SI + SUBQ $0x01, SI + MOVQ $0xffffffffffffffc0, DI + ANDQ DI, SI + MOVQ AX, DI + SUBQ $0x01, DI + ANDQ $0x3f, DI + SUBQ $0x3f, DI + ADDQ SI, DI + MOVQ DI, SI + ADDQ CX, SI + MOVQ AX, DI + +loop: + MOVQ $0xb492b66fbe98f273, R12 + ADDQ BX, DX + ADDQ R8, DX + ADDQ 8(CX), DX + RORQ $0x25, DX + IMULQ R12, DX + ADDQ R9, BX + ADDQ 48(CX), BX + RORQ $0x2a, BX + IMULQ R12, BX + XORQ R11, DX + ADDQ R8, BX + ADDQ 40(CX), BX + ADDQ R10, BP + RORQ $0x21, BP + IMULQ R12, BP + IMULQ R12, R9 + MOVQ DX, R8 + ADDQ R10, R8 + ADDQ (CX), R9 + ADDQ R9, R8 + ADDQ 24(CX), R8 + RORQ $0x15, R8 + MOVQ R9, R10 + ADDQ 8(CX), R9 + ADDQ 16(CX), R9 + MOVQ R9, R13 + RORQ $0x2c, R13 + ADDQ R13, R8 + ADDQ 24(CX), R9 + ADDQ R10, R8 + XCHGQ R9, R8 + ADDQ BP, R11 + MOVQ BX, R10 + ADDQ 16(CX), R10 + ADDQ 32(CX), R11 + ADDQ R11, R10 + ADDQ 56(CX), R10 + RORQ $0x15, R10 + MOVQ R11, R13 + ADDQ 40(CX), R11 + ADDQ 48(CX), R11 + MOVQ R11, R14 + RORQ $0x2c, R14 + ADDQ R14, R10 + ADDQ 56(CX), R11 + ADDQ R13, R10 + XCHGQ R11, R10 + XCHGQ BP, DX + ADDQ $0x40, CX + SUBQ $0x40, DI + CMPQ DI, $0x40 + JG loop + MOVQ SI, CX + MOVQ BP, DI + ANDQ $0xff, DI + SHLQ $0x01, DI + ADDQ R12, DI + MOVQ SI, CX + SUBQ $0x01, AX + ANDQ $0x3f, AX + ADDQ AX, R10 + ADDQ R10, R8 + ADDQ R8, R10 + ADDQ BX, DX + ADDQ R8, DX + ADDQ 8(CX), DX + RORQ $0x25, DX + IMULQ DI, DX + ADDQ R9, BX + ADDQ 48(CX), BX + RORQ $0x2a, BX + IMULQ DI, BX + MOVQ $0x00000009, AX + IMULQ R11, AX + XORQ AX, DX + MOVQ $0x00000009, AX + IMULQ R8, AX + ADDQ AX, BX + ADDQ 40(CX), BX + ADDQ R10, BP + RORQ $0x21, BP + IMULQ DI, BP + IMULQ DI, R9 + MOVQ DX, R8 + ADDQ R10, R8 + ADDQ (CX), R9 + ADDQ R9, R8 + ADDQ 24(CX), R8 + RORQ $0x15, R8 + MOVQ R9, AX + ADDQ 8(CX), R9 + ADDQ 16(CX), R9 + MOVQ R9, SI + RORQ $0x2c, SI + ADDQ SI, R8 + ADDQ 24(CX), R9 + ADDQ AX, R8 + XCHGQ R9, R8 + ADDQ BP, R11 + MOVQ BX, R10 + ADDQ 16(CX), R10 + ADDQ 32(CX), R11 + ADDQ R11, R10 + ADDQ 56(CX), R10 + RORQ $0x15, R10 + MOVQ R11, AX + ADDQ 40(CX), R11 + ADDQ 48(CX), R11 + MOVQ R11, SI + RORQ $0x2c, SI + ADDQ SI, R10 + ADDQ 56(CX), R11 + ADDQ AX, R10 + XCHGQ R11, R10 + XCHGQ BP, DX + XORQ R10, R8 + IMULQ DI, R8 + MOVQ R8, AX + SHRQ $0x2f, AX + XORQ R8, AX + XORQ AX, R10 + IMULQ DI, R10 + MOVQ R10, AX + SHRQ $0x2f, AX + XORQ R10, AX + IMULQ DI, AX + ADDQ BP, AX + MOVQ BX, CX + SHRQ $0x2f, CX + XORQ BX, CX + MOVQ $0xc3a5c85c97cb3127, BX + IMULQ BX, CX + ADDQ CX, AX + XORQ R11, R9 + IMULQ DI, R9 + MOVQ R9, CX + SHRQ $0x2f, CX + XORQ R9, CX + XORQ CX, R11 + IMULQ DI, R11 + MOVQ R11, CX + SHRQ $0x2f, CX + XORQ R11, CX + IMULQ DI, CX + ADDQ DX, CX + XORQ CX, AX + IMULQ DI, AX + MOVQ AX, DX + SHRQ $0x2f, DX + XORQ AX, DX + XORQ DX, CX + IMULQ DI, CX + MOVQ CX, AX + SHRQ $0x2f, AX + XORQ CX, AX + IMULQ DI, AX + MOVQ AX, ret+24(FP) + RET + +// func Fingerprint32(s []byte) uint32 +TEXT ·Fingerprint32(SB), NOSPLIT, $0-28 + MOVQ s_base+0(FP), AX + MOVQ s_len+8(FP), CX + CMPQ CX, $0x18 + JG long + CMPQ CX, $0x0c + JG hash_13_24 + CMPQ CX, $0x04 + JG hash_5_12 + XORL DX, DX + MOVL $0x00000009, BX + TESTQ CX, CX + JZ done + MOVQ CX, BP + MOVL $0xcc9e2d51, DI + IMULL DI, DX + MOVBLSX (AX), SI + ADDL SI, DX + XORL DX, BX + SUBQ $0x01, BP + TESTQ BP, BP + JZ done + IMULL DI, DX + MOVBLSX 1(AX), SI + ADDL SI, DX + XORL DX, BX + SUBQ $0x01, BP + TESTQ BP, BP + JZ done + IMULL DI, DX + MOVBLSX 2(AX), SI + ADDL SI, DX + XORL DX, BX + SUBQ $0x01, BP + TESTQ BP, BP + JZ done + IMULL DI, DX + MOVBLSX 3(AX), SI + ADDL SI, DX + XORL DX, BX + SUBQ $0x01, BP + TESTQ BP, BP + JZ done + +done: + MOVL CX, BP + MOVL $0xcc9e2d51, SI + IMULL SI, BP + RORL $0x11, BP + MOVL $0x1b873593, SI + IMULL SI, BP + XORL BP, BX + RORL $0x13, BX + LEAL (BX)(BX*4), BP + LEAL 3864292196(BP), BX + MOVL $0xcc9e2d51, BP + IMULL BP, DX + RORL $0x11, DX + MOVL $0x1b873593, BP + IMULL BP, DX + XORL DX, BX + RORL $0x13, BX + LEAL (BX)(BX*4), DX + LEAL 3864292196(DX), BX + MOVL BX, DX + SHRL $0x10, DX + XORL DX, BX + MOVL $0x85ebca6b, DX + IMULL DX, BX + MOVL BX, DX + SHRL $0x0d, DX + XORL DX, BX + MOVL $0xc2b2ae35, DX + IMULL DX, BX + MOVL BX, DX + SHRL $0x10, DX + XORL DX, BX + MOVL BX, ret+24(FP) + RET + +hash_5_12: + MOVL CX, DX + MOVL DX, BX + SHLL $0x02, BX + ADDL DX, BX + MOVL $0x00000009, BP + MOVL BX, SI + ADDL (AX), DX + MOVQ CX, DI + SUBQ $0x04, DI + ADDQ AX, DI + ADDL (DI), BX + MOVQ CX, DI + SHRQ $0x01, DI + ANDQ $0x04, DI + ADDQ AX, DI + ADDL (DI), BP + MOVL $0xcc9e2d51, DI + IMULL DI, DX + RORL $0x11, DX + MOVL $0x1b873593, DI + IMULL DI, DX + XORL DX, SI + RORL $0x13, SI + LEAL (SI)(SI*4), DX + LEAL 3864292196(DX), SI + MOVL $0xcc9e2d51, DX + IMULL DX, BX + RORL $0x11, BX + MOVL $0x1b873593, DX + IMULL DX, BX + XORL BX, SI + RORL $0x13, SI + LEAL (SI)(SI*4), BX + LEAL 3864292196(BX), SI + MOVL $0xcc9e2d51, DX + IMULL DX, BP + RORL $0x11, BP + MOVL $0x1b873593, DX + IMULL DX, BP + XORL BP, SI + RORL $0x13, SI + LEAL (SI)(SI*4), BP + LEAL 3864292196(BP), SI + MOVL SI, DX + SHRL $0x10, DX + XORL DX, SI + MOVL $0x85ebca6b, DX + IMULL DX, SI + MOVL SI, DX + SHRL $0x0d, DX + XORL DX, SI + MOVL $0xc2b2ae35, DX + IMULL DX, SI + MOVL SI, DX + SHRL $0x10, DX + XORL DX, SI + MOVL SI, ret+24(FP) + RET + +hash_13_24: + MOVQ CX, DX + SHRQ $0x01, DX + ADDQ AX, DX + MOVL -4(DX), BX + MOVL 4(AX), BP + MOVQ CX, SI + ADDQ AX, SI + MOVL -8(SI), DI + MOVL (DX), DX + MOVL (AX), R8 + MOVL -4(SI), SI + MOVL $0xcc9e2d51, R9 + IMULL DX, R9 + ADDL CX, R9 + RORL $0x0c, BX + ADDL SI, BX + MOVL DI, R10 + MOVL $0xcc9e2d51, R11 + IMULL R11, R10 + RORL $0x11, R10 + MOVL $0x1b873593, R11 + IMULL R11, R10 + XORL R10, R9 + RORL $0x13, R9 + LEAL (R9)(R9*4), R10 + LEAL 3864292196(R10), R9 + ADDL BX, R9 + RORL $0x03, BX + ADDL DI, BX + MOVL $0xcc9e2d51, DI + IMULL DI, R8 + RORL $0x11, R8 + MOVL $0x1b873593, DI + IMULL DI, R8 + XORL R8, R9 + RORL $0x13, R9 + LEAL (R9)(R9*4), R8 + LEAL 3864292196(R8), R9 + ADDL BX, R9 + ADDL SI, BX + RORL $0x0c, BX + ADDL DX, BX + MOVL $0xcc9e2d51, DX + IMULL DX, BP + RORL $0x11, BP + MOVL $0x1b873593, DX + IMULL DX, BP + XORL BP, R9 + RORL $0x13, R9 + LEAL (R9)(R9*4), BP + LEAL 3864292196(BP), R9 + ADDL BX, R9 + MOVL R9, DX + SHRL $0x10, DX + XORL DX, R9 + MOVL $0x85ebca6b, DX + IMULL DX, R9 + MOVL R9, DX + SHRL $0x0d, DX + XORL DX, R9 + MOVL $0xc2b2ae35, DX + IMULL DX, R9 + MOVL R9, DX + SHRL $0x10, DX + XORL DX, R9 + MOVL R9, ret+24(FP) + RET + +long: + MOVL CX, DX + MOVL $0xcc9e2d51, BX + IMULL DX, BX + MOVL BX, BP + MOVQ CX, SI + ADDQ AX, SI + MOVL $0xcc9e2d51, DI + MOVL $0x1b873593, R8 + MOVL -4(SI), R9 + IMULL DI, R9 + RORL $0x11, R9 + IMULL R8, R9 + XORL R9, DX + RORL $0x13, DX + MOVL DX, R9 + SHLL $0x02, R9 + ADDL R9, DX + ADDL $0xe6546b64, DX + MOVL -8(SI), R9 + IMULL DI, R9 + RORL $0x11, R9 + IMULL R8, R9 + XORL R9, BX + RORL $0x13, BX + MOVL BX, R9 + SHLL $0x02, R9 + ADDL R9, BX + ADDL $0xe6546b64, BX + MOVL -16(SI), R9 + IMULL DI, R9 + RORL $0x11, R9 + IMULL R8, R9 + XORL R9, DX + RORL $0x13, DX + MOVL DX, R9 + SHLL $0x02, R9 + ADDL R9, DX + ADDL $0xe6546b64, DX + MOVL -12(SI), R9 + IMULL DI, R9 + RORL $0x11, R9 + IMULL R8, R9 + XORL R9, BX + RORL $0x13, BX + MOVL BX, R9 + SHLL $0x02, R9 + ADDL R9, BX + ADDL $0xe6546b64, BX + PREFETCHT0 (AX) + MOVL -20(SI), SI + IMULL DI, SI + RORL $0x11, SI + IMULL R8, SI + ADDL SI, BP + RORL $0x13, BP + ADDL $0x71, BP + +loop80: + CMPQ CX, $0x64 + JL loop20 + PREFETCHT0 20(AX) + MOVL (AX), SI + ADDL SI, DX + MOVL 4(AX), DI + ADDL DI, BX + MOVL 8(AX), R8 + ADDL R8, BP + MOVL 12(AX), R9 + MOVL R9, R11 + MOVL $0xcc9e2d51, R10 + IMULL R10, R11 + RORL $0x11, R11 + MOVL $0x1b873593, R10 + IMULL R10, R11 + XORL R11, DX + RORL $0x13, DX + LEAL (DX)(DX*4), R11 + LEAL 3864292196(R11), DX + MOVL 16(AX), R10 + ADDL R10, DX + MOVL R8, R11 + MOVL $0xcc9e2d51, R8 + IMULL R8, R11 + RORL $0x11, R11 + MOVL $0x1b873593, R8 + IMULL R8, R11 + XORL R11, BX + RORL $0x13, BX + LEAL (BX)(BX*4), R11 + LEAL 3864292196(R11), BX + ADDL SI, BX + MOVL $0xcc9e2d51, SI + IMULL SI, R10 + MOVL R10, R11 + ADDL DI, R11 + MOVL $0xcc9e2d51, SI + IMULL SI, R11 + RORL $0x11, R11 + MOVL $0x1b873593, SI + IMULL SI, R11 + XORL R11, BP + RORL $0x13, BP + LEAL (BP)(BP*4), R11 + LEAL 3864292196(R11), BP + ADDL R9, BP + ADDL BX, BP + ADDL BP, BX + PREFETCHT0 40(AX) + MOVL 20(AX), SI + ADDL SI, DX + MOVL 24(AX), DI + ADDL DI, BX + MOVL 28(AX), R8 + ADDL R8, BP + MOVL 32(AX), R9 + MOVL R9, R11 + MOVL $0xcc9e2d51, R10 + IMULL R10, R11 + RORL $0x11, R11 + MOVL $0x1b873593, R10 + IMULL R10, R11 + XORL R11, DX + RORL $0x13, DX + LEAL (DX)(DX*4), R11 + LEAL 3864292196(R11), DX + MOVL 36(AX), R10 + ADDL R10, DX + MOVL R8, R11 + MOVL $0xcc9e2d51, R8 + IMULL R8, R11 + RORL $0x11, R11 + MOVL $0x1b873593, R8 + IMULL R8, R11 + XORL R11, BX + RORL $0x13, BX + LEAL (BX)(BX*4), R11 + LEAL 3864292196(R11), BX + ADDL SI, BX + MOVL $0xcc9e2d51, SI + IMULL SI, R10 + MOVL R10, R11 + ADDL DI, R11 + MOVL $0xcc9e2d51, SI + IMULL SI, R11 + RORL $0x11, R11 + MOVL $0x1b873593, SI + IMULL SI, R11 + XORL R11, BP + RORL $0x13, BP + LEAL (BP)(BP*4), R11 + LEAL 3864292196(R11), BP + ADDL R9, BP + ADDL BX, BP + ADDL BP, BX + PREFETCHT0 60(AX) + MOVL 40(AX), SI + ADDL SI, DX + MOVL 44(AX), DI + ADDL DI, BX + MOVL 48(AX), R8 + ADDL R8, BP + MOVL 52(AX), R9 + MOVL R9, R11 + MOVL $0xcc9e2d51, R10 + IMULL R10, R11 + RORL $0x11, R11 + MOVL $0x1b873593, R10 + IMULL R10, R11 + XORL R11, DX + RORL $0x13, DX + LEAL (DX)(DX*4), R11 + LEAL 3864292196(R11), DX + MOVL 56(AX), R10 + ADDL R10, DX + MOVL R8, R11 + MOVL $0xcc9e2d51, R8 + IMULL R8, R11 + RORL $0x11, R11 + MOVL $0x1b873593, R8 + IMULL R8, R11 + XORL R11, BX + RORL $0x13, BX + LEAL (BX)(BX*4), R11 + LEAL 3864292196(R11), BX + ADDL SI, BX + MOVL $0xcc9e2d51, SI + IMULL SI, R10 + MOVL R10, R11 + ADDL DI, R11 + MOVL $0xcc9e2d51, SI + IMULL SI, R11 + RORL $0x11, R11 + MOVL $0x1b873593, SI + IMULL SI, R11 + XORL R11, BP + RORL $0x13, BP + LEAL (BP)(BP*4), R11 + LEAL 3864292196(R11), BP + ADDL R9, BP + ADDL BX, BP + ADDL BP, BX + PREFETCHT0 80(AX) + MOVL 60(AX), SI + ADDL SI, DX + MOVL 64(AX), DI + ADDL DI, BX + MOVL 68(AX), R8 + ADDL R8, BP + MOVL 72(AX), R9 + MOVL R9, R11 + MOVL $0xcc9e2d51, R10 + IMULL R10, R11 + RORL $0x11, R11 + MOVL $0x1b873593, R10 + IMULL R10, R11 + XORL R11, DX + RORL $0x13, DX + LEAL (DX)(DX*4), R11 + LEAL 3864292196(R11), DX + MOVL 76(AX), R10 + ADDL R10, DX + MOVL R8, R11 + MOVL $0xcc9e2d51, R8 + IMULL R8, R11 + RORL $0x11, R11 + MOVL $0x1b873593, R8 + IMULL R8, R11 + XORL R11, BX + RORL $0x13, BX + LEAL (BX)(BX*4), R11 + LEAL 3864292196(R11), BX + ADDL SI, BX + MOVL $0xcc9e2d51, SI + IMULL SI, R10 + MOVL R10, R11 + ADDL DI, R11 + MOVL $0xcc9e2d51, SI + IMULL SI, R11 + RORL $0x11, R11 + MOVL $0x1b873593, SI + IMULL SI, R11 + XORL R11, BP + RORL $0x13, BP + LEAL (BP)(BP*4), R11 + LEAL 3864292196(R11), BP + ADDL R9, BP + ADDL BX, BP + ADDL BP, BX + ADDQ $0x50, AX + SUBQ $0x50, CX + JMP loop80 + +loop20: + CMPQ CX, $0x14 + JLE after + MOVL (AX), SI + ADDL SI, DX + MOVL 4(AX), DI + ADDL DI, BX + MOVL 8(AX), R8 + ADDL R8, BP + MOVL 12(AX), R9 + MOVL R9, R11 + MOVL $0xcc9e2d51, R10 + IMULL R10, R11 + RORL $0x11, R11 + MOVL $0x1b873593, R10 + IMULL R10, R11 + XORL R11, DX + RORL $0x13, DX + LEAL (DX)(DX*4), R11 + LEAL 3864292196(R11), DX + MOVL 16(AX), R10 + ADDL R10, DX + MOVL R8, R11 + MOVL $0xcc9e2d51, R8 + IMULL R8, R11 + RORL $0x11, R11 + MOVL $0x1b873593, R8 + IMULL R8, R11 + XORL R11, BX + RORL $0x13, BX + LEAL (BX)(BX*4), R11 + LEAL 3864292196(R11), BX + ADDL SI, BX + MOVL $0xcc9e2d51, SI + IMULL SI, R10 + MOVL R10, R11 + ADDL DI, R11 + MOVL $0xcc9e2d51, SI + IMULL SI, R11 + RORL $0x11, R11 + MOVL $0x1b873593, SI + IMULL SI, R11 + XORL R11, BP + RORL $0x13, BP + LEAL (BP)(BP*4), R11 + LEAL 3864292196(R11), BP + ADDL R9, BP + ADDL BX, BP + ADDL BP, BX + ADDQ $0x14, AX + SUBQ $0x14, CX + JMP loop20 + +after: + MOVL $0xcc9e2d51, AX + RORL $0x0b, BX + IMULL AX, BX + RORL $0x11, BX + IMULL AX, BX + RORL $0x0b, BP + IMULL AX, BP + RORL $0x11, BP + IMULL AX, BP + ADDL BX, DX + RORL $0x13, DX + MOVL DX, CX + SHLL $0x02, CX + ADDL CX, DX + ADDL $0xe6546b64, DX + RORL $0x11, DX + IMULL AX, DX + ADDL BP, DX + RORL $0x13, DX + MOVL DX, CX + SHLL $0x02, CX + ADDL CX, DX + ADDL $0xe6546b64, DX + RORL $0x11, DX + IMULL AX, DX + MOVL DX, ret+24(FP) + RET diff --git a/vendor/github.com/dgryski/go-farm/fp_generic.go b/vendor/github.com/dgryski/go-farm/fp_generic.go new file mode 100644 index 00000000..2cfa1b9d --- /dev/null +++ b/vendor/github.com/dgryski/go-farm/fp_generic.go @@ -0,0 +1,13 @@ +// +build !amd64 purego + +package farm + +// Fingerprint64 is a 64-bit fingerprint function for byte-slices +func Fingerprint64(s []byte) uint64 { + return naHash64(s) +} + +// Fingerprint32 is a 32-bit fingerprint function for byte-slices +func Fingerprint32(s []byte) uint32 { + return Hash32(s) +} diff --git a/vendor/github.com/dgryski/go-farm/fp_stub.go b/vendor/github.com/dgryski/go-farm/fp_stub.go new file mode 100644 index 00000000..94fff8de --- /dev/null +++ b/vendor/github.com/dgryski/go-farm/fp_stub.go @@ -0,0 +1,9 @@ +// Code generated by command: go run asm.go -out=fp_amd64.s -stubs=fp_stub.go. DO NOT EDIT. + +// +build amd64,!purego + +package farm + +func Fingerprint64(s []byte) uint64 + +func Fingerprint32(s []byte) uint32 diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml new file mode 100644 index 00000000..ba95cdd1 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/.travis.yml @@ -0,0 +1,21 @@ +sudo: false +language: go +go: + - 1.3.x + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - master +matrix: + allow_failures: + - go: master + fast_finish: true +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d -s .) + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/dustin/go-humanize/LICENSE b/vendor/github.com/dustin/go-humanize/LICENSE new file mode 100644 index 00000000..8d9a94a9 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) 2005-2008 Dustin Sallings + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown new file mode 100644 index 00000000..91b4ae56 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/README.markdown @@ -0,0 +1,124 @@ +# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize) + +Just a few functions for helping humanize times and sizes. + +`go get` it as `github.com/dustin/go-humanize`, import it as +`"github.com/dustin/go-humanize"`, use it as `humanize`. + +See [godoc](https://godoc.org/github.com/dustin/go-humanize) for +complete documentation. + +## Sizes + +This lets you take numbers like `82854982` and convert them to useful +strings like, `83 MB` or `79 MiB` (whichever you prefer). + +Example: + +```go +fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB. +``` + +## Times + +This lets you take a `time.Time` and spit it out in relative terms. +For example, `12 seconds ago` or `3 days from now`. + +Example: + +```go +fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago. +``` + +Thanks to Kyle Lemons for the time implementation from an IRC +conversation one day. It's pretty neat. + +## Ordinals + +From a [mailing list discussion][odisc] where a user wanted to be able +to label ordinals. + + 0 -> 0th + 1 -> 1st + 2 -> 2nd + 3 -> 3rd + 4 -> 4th + [...] + +Example: + +```go +fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend. +``` + +## Commas + +Want to shove commas into numbers? Be my guest. + + 0 -> 0 + 100 -> 100 + 1000 -> 1,000 + 1000000000 -> 1,000,000,000 + -100000 -> -100,000 + +Example: + +```go +fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491. +``` + +## Ftoa + +Nicer float64 formatter that removes trailing zeros. + +```go +fmt.Printf("%f", 2.24) // 2.240000 +fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24 +fmt.Printf("%f", 2.0) // 2.000000 +fmt.Printf("%s", humanize.Ftoa(2.0)) // 2 +``` + +## SI notation + +Format numbers with [SI notation][sinotation]. + +Example: + +```go +humanize.SI(0.00000000223, "M") // 2.23 nM +``` + +## English-specific functions + +The following functions are in the `humanize/english` subpackage. + +### Plurals + +Simple English pluralization + +```go +english.PluralWord(1, "object", "") // object +english.PluralWord(42, "object", "") // objects +english.PluralWord(2, "bus", "") // buses +english.PluralWord(99, "locus", "loci") // loci + +english.Plural(1, "object", "") // 1 object +english.Plural(42, "object", "") // 42 objects +english.Plural(2, "bus", "") // 2 buses +english.Plural(99, "locus", "loci") // 99 loci +``` + +### Word series + +Format comma-separated words lists with conjuctions: + +```go +english.WordSeries([]string{"foo"}, "and") // foo +english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar +english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz + +english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz +``` + +[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion +[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix diff --git a/vendor/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go new file mode 100644 index 00000000..f49dc337 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/big.go @@ -0,0 +1,31 @@ +package humanize + +import ( + "math/big" +) + +// order of magnitude (to a max order) +func oomm(n, b *big.Int, maxmag int) (float64, int) { + mag := 0 + m := &big.Int{} + for n.Cmp(b) >= 0 { + n.DivMod(n, b, m) + mag++ + if mag == maxmag && maxmag >= 0 { + break + } + } + return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag +} + +// total order of magnitude +// (same as above, but with no upper limit) +func oom(n, b *big.Int) (float64, int) { + mag := 0 + m := &big.Int{} + for n.Cmp(b) >= 0 { + n.DivMod(n, b, m) + mag++ + } + return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag +} diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go new file mode 100644 index 00000000..1a2bf617 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/bigbytes.go @@ -0,0 +1,173 @@ +package humanize + +import ( + "fmt" + "math/big" + "strings" + "unicode" +) + +var ( + bigIECExp = big.NewInt(1024) + + // BigByte is one byte in bit.Ints + BigByte = big.NewInt(1) + // BigKiByte is 1,024 bytes in bit.Ints + BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp) + // BigMiByte is 1,024 k bytes in bit.Ints + BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp) + // BigGiByte is 1,024 m bytes in bit.Ints + BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp) + // BigTiByte is 1,024 g bytes in bit.Ints + BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp) + // BigPiByte is 1,024 t bytes in bit.Ints + BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp) + // BigEiByte is 1,024 p bytes in bit.Ints + BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp) + // BigZiByte is 1,024 e bytes in bit.Ints + BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp) + // BigYiByte is 1,024 z bytes in bit.Ints + BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp) +) + +var ( + bigSIExp = big.NewInt(1000) + + // BigSIByte is one SI byte in big.Ints + BigSIByte = big.NewInt(1) + // BigKByte is 1,000 SI bytes in big.Ints + BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp) + // BigMByte is 1,000 SI k bytes in big.Ints + BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp) + // BigGByte is 1,000 SI m bytes in big.Ints + BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp) + // BigTByte is 1,000 SI g bytes in big.Ints + BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp) + // BigPByte is 1,000 SI t bytes in big.Ints + BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp) + // BigEByte is 1,000 SI p bytes in big.Ints + BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp) + // BigZByte is 1,000 SI e bytes in big.Ints + BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp) + // BigYByte is 1,000 SI z bytes in big.Ints + BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp) +) + +var bigBytesSizeTable = map[string]*big.Int{ + "b": BigByte, + "kib": BigKiByte, + "kb": BigKByte, + "mib": BigMiByte, + "mb": BigMByte, + "gib": BigGiByte, + "gb": BigGByte, + "tib": BigTiByte, + "tb": BigTByte, + "pib": BigPiByte, + "pb": BigPByte, + "eib": BigEiByte, + "eb": BigEByte, + "zib": BigZiByte, + "zb": BigZByte, + "yib": BigYiByte, + "yb": BigYByte, + // Without suffix + "": BigByte, + "ki": BigKiByte, + "k": BigKByte, + "mi": BigMiByte, + "m": BigMByte, + "gi": BigGiByte, + "g": BigGByte, + "ti": BigTiByte, + "t": BigTByte, + "pi": BigPiByte, + "p": BigPByte, + "ei": BigEiByte, + "e": BigEByte, + "z": BigZByte, + "zi": BigZiByte, + "y": BigYByte, + "yi": BigYiByte, +} + +var ten = big.NewInt(10) + +func humanateBigBytes(s, base *big.Int, sizes []string) string { + if s.Cmp(ten) < 0 { + return fmt.Sprintf("%d B", s) + } + c := (&big.Int{}).Set(s) + val, mag := oomm(c, base, len(sizes)-1) + suffix := sizes[mag] + f := "%.0f %s" + if val < 10 { + f = "%.1f %s" + } + + return fmt.Sprintf(f, val, suffix) + +} + +// BigBytes produces a human readable representation of an SI size. +// +// See also: ParseBigBytes. +// +// BigBytes(82854982) -> 83 MB +func BigBytes(s *big.Int) string { + sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} + return humanateBigBytes(s, bigSIExp, sizes) +} + +// BigIBytes produces a human readable representation of an IEC size. +// +// See also: ParseBigBytes. +// +// BigIBytes(82854982) -> 79 MiB +func BigIBytes(s *big.Int) string { + sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} + return humanateBigBytes(s, bigIECExp, sizes) +} + +// ParseBigBytes parses a string representation of bytes into the number +// of bytes it represents. +// +// See also: BigBytes, BigIBytes. +// +// ParseBigBytes("42 MB") -> 42000000, nil +// ParseBigBytes("42 mib") -> 44040192, nil +func ParseBigBytes(s string) (*big.Int, error) { + lastDigit := 0 + hasComma := false + for _, r := range s { + if !(unicode.IsDigit(r) || r == '.' || r == ',') { + break + } + if r == ',' { + hasComma = true + } + lastDigit++ + } + + num := s[:lastDigit] + if hasComma { + num = strings.Replace(num, ",", "", -1) + } + + val := &big.Rat{} + _, err := fmt.Sscanf(num, "%f", val) + if err != nil { + return nil, err + } + + extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) + if m, ok := bigBytesSizeTable[extra]; ok { + mv := (&big.Rat{}).SetInt(m) + val.Mul(val, mv) + rv := &big.Int{} + rv.Div(val.Num(), val.Denom()) + return rv, nil + } + + return nil, fmt.Errorf("unhandled size name: %v", extra) +} diff --git a/vendor/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go new file mode 100644 index 00000000..0b498f48 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/bytes.go @@ -0,0 +1,143 @@ +package humanize + +import ( + "fmt" + "math" + "strconv" + "strings" + "unicode" +) + +// IEC Sizes. +// kibis of bits +const ( + Byte = 1 << (iota * 10) + KiByte + MiByte + GiByte + TiByte + PiByte + EiByte +) + +// SI Sizes. +const ( + IByte = 1 + KByte = IByte * 1000 + MByte = KByte * 1000 + GByte = MByte * 1000 + TByte = GByte * 1000 + PByte = TByte * 1000 + EByte = PByte * 1000 +) + +var bytesSizeTable = map[string]uint64{ + "b": Byte, + "kib": KiByte, + "kb": KByte, + "mib": MiByte, + "mb": MByte, + "gib": GiByte, + "gb": GByte, + "tib": TiByte, + "tb": TByte, + "pib": PiByte, + "pb": PByte, + "eib": EiByte, + "eb": EByte, + // Without suffix + "": Byte, + "ki": KiByte, + "k": KByte, + "mi": MiByte, + "m": MByte, + "gi": GiByte, + "g": GByte, + "ti": TiByte, + "t": TByte, + "pi": PiByte, + "p": PByte, + "ei": EiByte, + "e": EByte, +} + +func logn(n, b float64) float64 { + return math.Log(n) / math.Log(b) +} + +func humanateBytes(s uint64, base float64, sizes []string) string { + if s < 10 { + return fmt.Sprintf("%d B", s) + } + e := math.Floor(logn(float64(s), base)) + suffix := sizes[int(e)] + val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10 + f := "%.0f %s" + if val < 10 { + f = "%.1f %s" + } + + return fmt.Sprintf(f, val, suffix) +} + +// Bytes produces a human readable representation of an SI size. +// +// See also: ParseBytes. +// +// Bytes(82854982) -> 83 MB +func Bytes(s uint64) string { + sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"} + return humanateBytes(s, 1000, sizes) +} + +// IBytes produces a human readable representation of an IEC size. +// +// See also: ParseBytes. +// +// IBytes(82854982) -> 79 MiB +func IBytes(s uint64) string { + sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} + return humanateBytes(s, 1024, sizes) +} + +// ParseBytes parses a string representation of bytes into the number +// of bytes it represents. +// +// See Also: Bytes, IBytes. +// +// ParseBytes("42 MB") -> 42000000, nil +// ParseBytes("42 mib") -> 44040192, nil +func ParseBytes(s string) (uint64, error) { + lastDigit := 0 + hasComma := false + for _, r := range s { + if !(unicode.IsDigit(r) || r == '.' || r == ',') { + break + } + if r == ',' { + hasComma = true + } + lastDigit++ + } + + num := s[:lastDigit] + if hasComma { + num = strings.Replace(num, ",", "", -1) + } + + f, err := strconv.ParseFloat(num, 64) + if err != nil { + return 0, err + } + + extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) + if m, ok := bytesSizeTable[extra]; ok { + f *= float64(m) + if f >= math.MaxUint64 { + return 0, fmt.Errorf("too large: %v", s) + } + return uint64(f), nil + } + + return 0, fmt.Errorf("unhandled size name: %v", extra) +} diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go new file mode 100644 index 00000000..520ae3e5 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/comma.go @@ -0,0 +1,116 @@ +package humanize + +import ( + "bytes" + "math" + "math/big" + "strconv" + "strings" +) + +// Comma produces a string form of the given number in base 10 with +// commas after every three orders of magnitude. +// +// e.g. Comma(834142) -> 834,142 +func Comma(v int64) string { + sign := "" + + // Min int64 can't be negated to a usable value, so it has to be special cased. + if v == math.MinInt64 { + return "-9,223,372,036,854,775,808" + } + + if v < 0 { + sign = "-" + v = 0 - v + } + + parts := []string{"", "", "", "", "", "", ""} + j := len(parts) - 1 + + for v > 999 { + parts[j] = strconv.FormatInt(v%1000, 10) + switch len(parts[j]) { + case 2: + parts[j] = "0" + parts[j] + case 1: + parts[j] = "00" + parts[j] + } + v = v / 1000 + j-- + } + parts[j] = strconv.Itoa(int(v)) + return sign + strings.Join(parts[j:], ",") +} + +// Commaf produces a string form of the given number in base 10 with +// commas after every three orders of magnitude. +// +// e.g. Commaf(834142.32) -> 834,142.32 +func Commaf(v float64) string { + buf := &bytes.Buffer{} + if v < 0 { + buf.Write([]byte{'-'}) + v = 0 - v + } + + comma := []byte{','} + + parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".") + pos := 0 + if len(parts[0])%3 != 0 { + pos += len(parts[0]) % 3 + buf.WriteString(parts[0][:pos]) + buf.Write(comma) + } + for ; pos < len(parts[0]); pos += 3 { + buf.WriteString(parts[0][pos : pos+3]) + buf.Write(comma) + } + buf.Truncate(buf.Len() - 1) + + if len(parts) > 1 { + buf.Write([]byte{'.'}) + buf.WriteString(parts[1]) + } + return buf.String() +} + +// CommafWithDigits works like the Commaf but limits the resulting +// string to the given number of decimal places. +// +// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3 +func CommafWithDigits(f float64, decimals int) string { + return stripTrailingDigits(Commaf(f), decimals) +} + +// BigComma produces a string form of the given big.Int in base 10 +// with commas after every three orders of magnitude. +func BigComma(b *big.Int) string { + sign := "" + if b.Sign() < 0 { + sign = "-" + b.Abs(b) + } + + athousand := big.NewInt(1000) + c := (&big.Int{}).Set(b) + _, m := oom(c, athousand) + parts := make([]string, m+1) + j := len(parts) - 1 + + mod := &big.Int{} + for b.Cmp(athousand) >= 0 { + b.DivMod(b, athousand, mod) + parts[j] = strconv.FormatInt(mod.Int64(), 10) + switch len(parts[j]) { + case 2: + parts[j] = "0" + parts[j] + case 1: + parts[j] = "00" + parts[j] + } + j-- + } + parts[j] = strconv.Itoa(int(b.Int64())) + return sign + strings.Join(parts[j:], ",") +} diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go new file mode 100644 index 00000000..620690de --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/commaf.go @@ -0,0 +1,40 @@ +// +build go1.6 + +package humanize + +import ( + "bytes" + "math/big" + "strings" +) + +// BigCommaf produces a string form of the given big.Float in base 10 +// with commas after every three orders of magnitude. +func BigCommaf(v *big.Float) string { + buf := &bytes.Buffer{} + if v.Sign() < 0 { + buf.Write([]byte{'-'}) + v.Abs(v) + } + + comma := []byte{','} + + parts := strings.Split(v.Text('f', -1), ".") + pos := 0 + if len(parts[0])%3 != 0 { + pos += len(parts[0]) % 3 + buf.WriteString(parts[0][:pos]) + buf.Write(comma) + } + for ; pos < len(parts[0]); pos += 3 { + buf.WriteString(parts[0][pos : pos+3]) + buf.Write(comma) + } + buf.Truncate(buf.Len() - 1) + + if len(parts) > 1 { + buf.Write([]byte{'.'}) + buf.WriteString(parts[1]) + } + return buf.String() +} diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go new file mode 100644 index 00000000..1c62b640 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/ftoa.go @@ -0,0 +1,46 @@ +package humanize + +import ( + "strconv" + "strings" +) + +func stripTrailingZeros(s string) string { + offset := len(s) - 1 + for offset > 0 { + if s[offset] == '.' { + offset-- + break + } + if s[offset] != '0' { + break + } + offset-- + } + return s[:offset+1] +} + +func stripTrailingDigits(s string, digits int) string { + if i := strings.Index(s, "."); i >= 0 { + if digits <= 0 { + return s[:i] + } + i++ + if i+digits >= len(s) { + return s + } + return s[:i+digits] + } + return s +} + +// Ftoa converts a float to a string with no trailing zeros. +func Ftoa(num float64) string { + return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64)) +} + +// FtoaWithDigits converts a float to a string but limits the resulting string +// to the given number of decimal places, and no trailing zeros. +func FtoaWithDigits(num float64, digits int) string { + return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits)) +} diff --git a/vendor/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go new file mode 100644 index 00000000..a2c2da31 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/humanize.go @@ -0,0 +1,8 @@ +/* +Package humanize converts boring ugly numbers to human-friendly strings and back. + +Durations can be turned into strings such as "3 days ago", numbers +representing sizes like 82854982 into useful strings like, "83 MB" or +"79 MiB" (whichever you prefer). +*/ +package humanize diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go new file mode 100644 index 00000000..dec61865 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/number.go @@ -0,0 +1,192 @@ +package humanize + +/* +Slightly adapted from the source to fit go-humanize. + +Author: https://github.com/gorhill +Source: https://gist.github.com/gorhill/5285193 + +*/ + +import ( + "math" + "strconv" +) + +var ( + renderFloatPrecisionMultipliers = [...]float64{ + 1, + 10, + 100, + 1000, + 10000, + 100000, + 1000000, + 10000000, + 100000000, + 1000000000, + } + + renderFloatPrecisionRounders = [...]float64{ + 0.5, + 0.05, + 0.005, + 0.0005, + 0.00005, + 0.000005, + 0.0000005, + 0.00000005, + 0.000000005, + 0.0000000005, + } +) + +// FormatFloat produces a formatted number as string based on the following user-specified criteria: +// * thousands separator +// * decimal separator +// * decimal precision +// +// Usage: s := RenderFloat(format, n) +// The format parameter tells how to render the number n. +// +// See examples: http://play.golang.org/p/LXc1Ddm1lJ +// +// Examples of format strings, given n = 12345.6789: +// "#,###.##" => "12,345.67" +// "#,###." => "12,345" +// "#,###" => "12345,678" +// "#\u202F###,##" => "12 345,68" +// "#.###,###### => 12.345,678900 +// "" (aka default format) => 12,345.67 +// +// The highest precision allowed is 9 digits after the decimal symbol. +// There is also a version for integer number, FormatInteger(), +// which is convenient for calls within template. +func FormatFloat(format string, n float64) string { + // Special cases: + // NaN = "NaN" + // +Inf = "+Infinity" + // -Inf = "-Infinity" + if math.IsNaN(n) { + return "NaN" + } + if n > math.MaxFloat64 { + return "Infinity" + } + if n < -math.MaxFloat64 { + return "-Infinity" + } + + // default format + precision := 2 + decimalStr := "." + thousandStr := "," + positiveStr := "" + negativeStr := "-" + + if len(format) > 0 { + format := []rune(format) + + // If there is an explicit format directive, + // then default values are these: + precision = 9 + thousandStr = "" + + // collect indices of meaningful formatting directives + formatIndx := []int{} + for i, char := range format { + if char != '#' && char != '0' { + formatIndx = append(formatIndx, i) + } + } + + if len(formatIndx) > 0 { + // Directive at index 0: + // Must be a '+' + // Raise an error if not the case + // index: 0123456789 + // +0.000,000 + // +000,000.0 + // +0000.00 + // +0000 + if formatIndx[0] == 0 { + if format[formatIndx[0]] != '+' { + panic("RenderFloat(): invalid positive sign directive") + } + positiveStr = "+" + formatIndx = formatIndx[1:] + } + + // Two directives: + // First is thousands separator + // Raise an error if not followed by 3-digit + // 0123456789 + // 0.000,000 + // 000,000.00 + if len(formatIndx) == 2 { + if (formatIndx[1] - formatIndx[0]) != 4 { + panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers") + } + thousandStr = string(format[formatIndx[0]]) + formatIndx = formatIndx[1:] + } + + // One directive: + // Directive is decimal separator + // The number of digit-specifier following the separator indicates wanted precision + // 0123456789 + // 0.00 + // 000,0000 + if len(formatIndx) == 1 { + decimalStr = string(format[formatIndx[0]]) + precision = len(format) - formatIndx[0] - 1 + } + } + } + + // generate sign part + var signStr string + if n >= 0.000000001 { + signStr = positiveStr + } else if n <= -0.000000001 { + signStr = negativeStr + n = -n + } else { + signStr = "" + n = 0.0 + } + + // split number into integer and fractional parts + intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision]) + + // generate integer part string + intStr := strconv.FormatInt(int64(intf), 10) + + // add thousand separator if required + if len(thousandStr) > 0 { + for i := len(intStr); i > 3; { + i -= 3 + intStr = intStr[:i] + thousandStr + intStr[i:] + } + } + + // no fractional part, we can leave now + if precision == 0 { + return signStr + intStr + } + + // generate fractional part + fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision])) + // may need padding + if len(fracStr) < precision { + fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr + } + + return signStr + intStr + decimalStr + fracStr +} + +// FormatInteger produces a formatted number as string. +// See FormatFloat. +func FormatInteger(format string, n int) string { + return FormatFloat(format, float64(n)) +} diff --git a/vendor/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go new file mode 100644 index 00000000..43d88a86 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/ordinals.go @@ -0,0 +1,25 @@ +package humanize + +import "strconv" + +// Ordinal gives you the input number in a rank/ordinal format. +// +// Ordinal(3) -> 3rd +func Ordinal(x int) string { + suffix := "th" + switch x % 10 { + case 1: + if x%100 != 11 { + suffix = "st" + } + case 2: + if x%100 != 12 { + suffix = "nd" + } + case 3: + if x%100 != 13 { + suffix = "rd" + } + } + return strconv.Itoa(x) + suffix +} diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go new file mode 100644 index 00000000..ae659e0e --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/si.go @@ -0,0 +1,123 @@ +package humanize + +import ( + "errors" + "math" + "regexp" + "strconv" +) + +var siPrefixTable = map[float64]string{ + -24: "y", // yocto + -21: "z", // zepto + -18: "a", // atto + -15: "f", // femto + -12: "p", // pico + -9: "n", // nano + -6: "µ", // micro + -3: "m", // milli + 0: "", + 3: "k", // kilo + 6: "M", // mega + 9: "G", // giga + 12: "T", // tera + 15: "P", // peta + 18: "E", // exa + 21: "Z", // zetta + 24: "Y", // yotta +} + +var revSIPrefixTable = revfmap(siPrefixTable) + +// revfmap reverses the map and precomputes the power multiplier +func revfmap(in map[float64]string) map[string]float64 { + rv := map[string]float64{} + for k, v := range in { + rv[v] = math.Pow(10, k) + } + return rv +} + +var riParseRegex *regexp.Regexp + +func init() { + ri := `^([\-0-9.]+)\s?([` + for _, v := range siPrefixTable { + ri += v + } + ri += `]?)(.*)` + + riParseRegex = regexp.MustCompile(ri) +} + +// ComputeSI finds the most appropriate SI prefix for the given number +// and returns the prefix along with the value adjusted to be within +// that prefix. +// +// See also: SI, ParseSI. +// +// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p") +func ComputeSI(input float64) (float64, string) { + if input == 0 { + return 0, "" + } + mag := math.Abs(input) + exponent := math.Floor(logn(mag, 10)) + exponent = math.Floor(exponent/3) * 3 + + value := mag / math.Pow(10, exponent) + + // Handle special case where value is exactly 1000.0 + // Should return 1 M instead of 1000 k + if value == 1000.0 { + exponent += 3 + value = mag / math.Pow(10, exponent) + } + + value = math.Copysign(value, input) + + prefix := siPrefixTable[exponent] + return value, prefix +} + +// SI returns a string with default formatting. +// +// SI uses Ftoa to format float value, removing trailing zeros. +// +// See also: ComputeSI, ParseSI. +// +// e.g. SI(1000000, "B") -> 1 MB +// e.g. SI(2.2345e-12, "F") -> 2.2345 pF +func SI(input float64, unit string) string { + value, prefix := ComputeSI(input) + return Ftoa(value) + " " + prefix + unit +} + +// SIWithDigits works like SI but limits the resulting string to the +// given number of decimal places. +// +// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB +// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF +func SIWithDigits(input float64, decimals int, unit string) string { + value, prefix := ComputeSI(input) + return FtoaWithDigits(value, decimals) + " " + prefix + unit +} + +var errInvalid = errors.New("invalid input") + +// ParseSI parses an SI string back into the number and unit. +// +// See also: SI, ComputeSI. +// +// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil) +func ParseSI(input string) (float64, string, error) { + found := riParseRegex.FindStringSubmatch(input) + if len(found) != 4 { + return 0, "", errInvalid + } + mag := revSIPrefixTable[found[2]] + unit := found[3] + + base, err := strconv.ParseFloat(found[1], 64) + return base * mag, unit, err +} diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go new file mode 100644 index 00000000..dd3fbf5e --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/times.go @@ -0,0 +1,117 @@ +package humanize + +import ( + "fmt" + "math" + "sort" + "time" +) + +// Seconds-based time units +const ( + Day = 24 * time.Hour + Week = 7 * Day + Month = 30 * Day + Year = 12 * Month + LongTime = 37 * Year +) + +// Time formats a time into a relative string. +// +// Time(someT) -> "3 weeks ago" +func Time(then time.Time) string { + return RelTime(then, time.Now(), "ago", "from now") +} + +// A RelTimeMagnitude struct contains a relative time point at which +// the relative format of time will switch to a new format string. A +// slice of these in ascending order by their "D" field is passed to +// CustomRelTime to format durations. +// +// The Format field is a string that may contain a "%s" which will be +// replaced with the appropriate signed label (e.g. "ago" or "from +// now") and a "%d" that will be replaced by the quantity. +// +// The DivBy field is the amount of time the time difference must be +// divided by in order to display correctly. +// +// e.g. if D is 2*time.Minute and you want to display "%d minutes %s" +// DivBy should be time.Minute so whatever the duration is will be +// expressed in minutes. +type RelTimeMagnitude struct { + D time.Duration + Format string + DivBy time.Duration +} + +var defaultMagnitudes = []RelTimeMagnitude{ + {time.Second, "now", time.Second}, + {2 * time.Second, "1 second %s", 1}, + {time.Minute, "%d seconds %s", time.Second}, + {2 * time.Minute, "1 minute %s", 1}, + {time.Hour, "%d minutes %s", time.Minute}, + {2 * time.Hour, "1 hour %s", 1}, + {Day, "%d hours %s", time.Hour}, + {2 * Day, "1 day %s", 1}, + {Week, "%d days %s", Day}, + {2 * Week, "1 week %s", 1}, + {Month, "%d weeks %s", Week}, + {2 * Month, "1 month %s", 1}, + {Year, "%d months %s", Month}, + {18 * Month, "1 year %s", 1}, + {2 * Year, "2 years %s", 1}, + {LongTime, "%d years %s", Year}, + {math.MaxInt64, "a long while %s", 1}, +} + +// RelTime formats a time into a relative string. +// +// It takes two times and two labels. In addition to the generic time +// delta string (e.g. 5 minutes), the labels are used applied so that +// the label corresponding to the smaller time is applied. +// +// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier" +func RelTime(a, b time.Time, albl, blbl string) string { + return CustomRelTime(a, b, albl, blbl, defaultMagnitudes) +} + +// CustomRelTime formats a time into a relative string. +// +// It takes two times two labels and a table of relative time formats. +// In addition to the generic time delta string (e.g. 5 minutes), the +// labels are used applied so that the label corresponding to the +// smaller time is applied. +func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string { + lbl := albl + diff := b.Sub(a) + + if a.After(b) { + lbl = blbl + diff = a.Sub(b) + } + + n := sort.Search(len(magnitudes), func(i int) bool { + return magnitudes[i].D > diff + }) + + if n >= len(magnitudes) { + n = len(magnitudes) - 1 + } + mag := magnitudes[n] + args := []interface{}{} + escaped := false + for _, ch := range mag.Format { + if escaped { + switch ch { + case 's': + args = append(args, lbl) + case 'd': + args = append(args, diff/mag.DivBy) + } + escaped = false + } else { + escaped = ch == '%' + } + } + return fmt.Sprintf(mag.Format, args...) +} diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/geth/main.go b/vendor/github.com/ethereum/go-ethereum/cmd/geth/main.go index 39375a2b..bb84bf27 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/geth/main.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/geth/main.go @@ -143,6 +143,10 @@ var ( utils.EWASMInterpreterFlag, utils.EVMInterpreterFlag, utils.StateDiffFlag, + utils.StateDiffPathsAndProofs, + utils.StateDiffAllNodeTypes, + utils.StateDiffStreamBlock, + utils.StateDiffWatchedAddresses, configFileFlag, } diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/geth/usage.go b/vendor/github.com/ethereum/go-ethereum/cmd/geth/usage.go index e7a9e1fb..4318c5ba 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/geth/usage.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/geth/usage.go @@ -249,6 +249,10 @@ var AppHelpFlagGroups = []flagGroup{ Name: "STATE DIFF", Flags: []cli.Flag{ utils.StateDiffFlag, + utils.StateDiffPathsAndProofs, + utils.StateDiffAllNodeTypes, + utils.StateDiffWatchedAddresses, + utils.StateDiffStreamBlock, }, }, { diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/utils/flags.go b/vendor/github.com/ethereum/go-ethereum/cmd/utils/flags.go index e4bb18a8..b26dc035 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/utils/flags.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/utils/flags.go @@ -714,6 +714,22 @@ var ( Name: "statediff", Usage: "Enables the calculation of state diffs between each block, persists these state diffs the configured persistence mode.", } + StateDiffPathsAndProofs = cli.BoolFlag{ + Name: "statediff.pathsandproofs", + Usage: "Set to true to generate paths and proof sets for diffed state and storage trie lead nodes", + } + StateDiffAllNodeTypes = cli.BoolFlag{ + Name: "statediff.allnodes", + Usage: "Set to true to consider all node types: leaf, branch, and extension; default (false) processes leaf nodes only", + } + StateDiffWatchedAddresses = cli.StringSliceFlag{ + Name: "statediff.watchedaddresses", + Usage: "If provided, state diffing process is restricted to these addresses", + } + StateDiffStreamBlock = cli.BoolFlag{ + Name: "statediff.streamblock", + Usage: "Set to true to stream the block data alongside state diff data", + } ) // MakeDataDir retrieves the currently requested data directory, terminating @@ -1528,12 +1544,18 @@ func RegisterEthStatsService(stack *node.Node, url string) { // RegisterStateDiffService configures and registers a service to stream state diff data over RPC func RegisterStateDiffService(stack *node.Node, ctx *cli.Context) { + config := statediff.Config{ + StreamBlock: ctx.GlobalBool(StateDiffStreamBlock.Name), + PathsAndProofs: ctx.GlobalBool(StateDiffPathsAndProofs.Name), + AllNodes: ctx.GlobalBool(StateDiffAllNodeTypes.Name), + WatchedAddresses: ctx.GlobalStringSlice(StateDiffWatchedAddresses.Name), + } if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { var ethServ *eth.Ethereum ctx.Service(ðServ) chainDb := ethServ.ChainDb() blockChain := ethServ.BlockChain() - return statediff.NewStateDiffService(chainDb, blockChain) + return statediff.NewStateDiffService(chainDb, blockChain, config) }); err != nil { Fatalf("Failed to register State Diff Service", err) } diff --git a/vendor/github.com/ethereum/go-ethereum/core/blockchain.go b/vendor/github.com/ethereum/go-ethereum/core/blockchain.go index 6dc88e3d..50530fa4 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/blockchain.go +++ b/vendor/github.com/ethereum/go-ethereum/core/blockchain.go @@ -1025,16 +1025,19 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. bc.triegc.Push(root, number) break } - if bc.cacheConfig.ProcessingStateDiffs { if !bc.allowedRootToBeDereferenced(root.(common.Hash)) { bc.triegc.Push(root, number) break } else { + log.Debug("Current root found in stateDiffsProcessed collection with a count of 2, okay to dereference", + "root", root.(common.Hash).Hex(), + "blockNumber", uint64(-number), + "size of stateDiffsProcessed", len(bc.stateDiffsProcessed)) delete(bc.stateDiffsProcessed, root.(common.Hash)) } } - + log.Debug("Dereferencing", "root", root.(common.Hash).Hex()) triedb.Dereference(root.(common.Hash)) } } diff --git a/vendor/github.com/ethereum/go-ethereum/statediff/api.go b/vendor/github.com/ethereum/go-ethereum/statediff/api.go index 498c2f75..528fe56a 100644 --- a/vendor/github.com/ethereum/go-ethereum/statediff/api.go +++ b/vendor/github.com/ethereum/go-ethereum/statediff/api.go @@ -44,7 +44,7 @@ func NewPublicStateDiffAPI(sds IService) *PublicStateDiffAPI { } // Subscribe is the public method to setup a subscription that fires off state-diff payloads as they are created -func (api *PublicStateDiffAPI) Subscribe(ctx context.Context) (*rpc.Subscription, error) { +func (api *PublicStateDiffAPI) Subscribe(ctx context.Context, payloadChan chan Payload) (*rpc.Subscription, error) { // ensure that the RPC connection supports subscriptions notifier, supported := rpc.NotifierFromContext(ctx) if !supported { @@ -56,10 +56,9 @@ func (api *PublicStateDiffAPI) Subscribe(ctx context.Context) (*rpc.Subscription go func() { // subscribe to events from the state diff service - payloadChannel := make(chan Payload) + payloadChannel := make(chan Payload, 10) quitChan := make(chan bool) api.sds.Subscribe(rpcSub.ID, payloadChannel, quitChan) - // loop and await state diff payloads and relay them to the subscriber with then notifier for { select { @@ -67,8 +66,11 @@ func (api *PublicStateDiffAPI) Subscribe(ctx context.Context) (*rpc.Subscription if err := notifier.Notify(rpcSub.ID, packet); err != nil { log.Error("Failed to send state diff packet", "err", err) } - case <-rpcSub.Err(): - err := api.sds.Unsubscribe(rpcSub.ID) + case err := <-rpcSub.Err(): + log.Error("State diff service rpcSub error", err) + println("err") + println(err.Error()) + err = api.sds.Unsubscribe(rpcSub.ID) if err != nil { log.Error("Failed to unsubscribe from the state diff service", err) } diff --git a/vendor/github.com/ethereum/go-ethereum/statediff/builder_test.go b/vendor/github.com/ethereum/go-ethereum/statediff/builder_test.go deleted file mode 100644 index 7575b060..00000000 --- a/vendor/github.com/ethereum/go-ethereum/statediff/builder_test.go +++ /dev/null @@ -1,564 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package statediff_test - -import ( - "bytes" - "math/big" - "sort" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/statediff" - "github.com/ethereum/go-ethereum/statediff/testhelpers" -) - -var ( - contractLeafKey common.Hash - emptyAccountDiffEventualMap = make([]statediff.AccountDiff, 0) - emptyAccountDiffIncrementalMap = make([]statediff.AccountDiff, 0) - block0, block1, block2, block3 *types.Block - builder statediff.Builder - miningReward = int64(2000000000000000000) - burnAddress = common.HexToAddress("0x0") - burnLeafKey = testhelpers.AddressToLeafKey(burnAddress) - - block0Hash = common.HexToHash("0xd1721cfd0b29c36fd7a68f25c128e86413fb666a6e1d68e89b875bd299262661") - block1Hash = common.HexToHash("0xbbe88de60ba33a3f18c0caa37d827bfb70252e19e40a07cd34041696c35ecb1a") - block2Hash = common.HexToHash("0x34ad0fd9bb2911986b75d518c822641079dea823bc6952343ebf05da1062b6f5") - block3Hash = common.HexToHash("0x9872058136c560a6ebed0c0522b8d3016fc21f4fb0fb6585ddd8fd4c54f9909a") - balanceChange10000 = int64(10000) - balanceChange1000 = int64(1000) - block1BankBalance = int64(99990000) - block1Account1Balance = int64(10000) - block2Account2Balance = int64(1000) - nonce0 = uint64(0) - nonce1 = uint64(1) - nonce2 = uint64(2) - nonce3 = uint64(3) - originalContractRoot = "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" - contractContractRoot = "0x821e2556a290c86405f8160a2d662042a431ba456b9db265c79bb837c04be5f0" - newContractRoot = "0x71e0d14b2b93e5c7f9748e69e1fe5f17498a1c3ac3cec29f96af13d7f8a4e070" - originalStorageLocation = common.HexToHash("0") - originalStorageKey = crypto.Keccak256Hash(originalStorageLocation[:]).Bytes() - updatedStorageLocation = common.HexToHash("2") - updatedStorageKey = crypto.Keccak256Hash(updatedStorageLocation[:]).Bytes() - originalStorageValue = common.Hex2Bytes("01") - updatedStorageValue = common.Hex2Bytes("03") - - account1, _ = rlp.EncodeToBytes(state.Account{ - Nonce: nonce0, - Balance: big.NewInt(balanceChange10000), - CodeHash: common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes(), - Root: common.HexToHash(originalContractRoot), - }) - burnAccount1, _ = rlp.EncodeToBytes(state.Account{ - Nonce: nonce0, - Balance: big.NewInt(miningReward), - CodeHash: common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes(), - Root: common.HexToHash(originalContractRoot), - }) - bankAccount1, _ = rlp.EncodeToBytes(state.Account{ - Nonce: nonce1, - Balance: big.NewInt(testhelpers.TestBankFunds.Int64() - balanceChange10000), - CodeHash: common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes(), - Root: common.HexToHash(originalContractRoot), - }) - account2, _ = rlp.EncodeToBytes(state.Account{ - Nonce: nonce0, - Balance: big.NewInt(balanceChange1000), - CodeHash: common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes(), - Root: common.HexToHash(originalContractRoot), - }) - contractAccount, _ = rlp.EncodeToBytes(state.Account{ - Nonce: nonce1, - Balance: big.NewInt(0), - CodeHash: common.HexToHash("0x753f98a8d4328b15636e46f66f2cb4bc860100aa17967cc145fcd17d1d4710ea").Bytes(), - Root: common.HexToHash(contractContractRoot), - }) - bankAccount2, _ = rlp.EncodeToBytes(state.Account{ - Nonce: nonce2, - Balance: big.NewInt(block1BankBalance - balanceChange1000), - CodeHash: common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes(), - Root: common.HexToHash(originalContractRoot), - }) - account3, _ = rlp.EncodeToBytes(state.Account{ - Nonce: nonce2, - Balance: big.NewInt(block1Account1Balance - balanceChange1000 + balanceChange1000), - CodeHash: common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes(), - Root: common.HexToHash(originalContractRoot), - }) - burnAccount2, _ = rlp.EncodeToBytes(state.Account{ - Nonce: nonce0, - Balance: big.NewInt(miningReward + miningReward), - CodeHash: common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes(), - Root: common.HexToHash(originalContractRoot), - }) - account4, _ = rlp.EncodeToBytes(state.Account{ - Nonce: nonce0, - Balance: big.NewInt(block2Account2Balance + miningReward), - CodeHash: common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes(), - Root: common.HexToHash(originalContractRoot), - }) - contractAccount2, _ = rlp.EncodeToBytes(state.Account{ - Nonce: nonce1, - Balance: big.NewInt(0), - CodeHash: common.HexToHash("0x753f98a8d4328b15636e46f66f2cb4bc860100aa17967cc145fcd17d1d4710ea").Bytes(), - Root: common.HexToHash(newContractRoot), - }) - bankAccount3, _ = rlp.EncodeToBytes(state.Account{ - Nonce: nonce3, - Balance: big.NewInt(99989000), - CodeHash: common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes(), - Root: common.HexToHash(originalContractRoot), - }) -) - -type arguments struct { - oldStateRoot common.Hash - newStateRoot common.Hash - blockNumber *big.Int - blockHash common.Hash -} - -func TestBuilder(t *testing.T) { - _, blockMap, chain := testhelpers.MakeChain(3, testhelpers.Genesis) - contractLeafKey = testhelpers.AddressToLeafKey(testhelpers.ContractAddr) - defer chain.Stop() - block0 = blockMap[block0Hash] - block1 = blockMap[block1Hash] - block2 = blockMap[block2Hash] - block3 = blockMap[block3Hash] - config := statediff.Config{ - PathsAndProofs: true, - AllNodes: false, - } - builder = statediff.NewBuilder(testhelpers.Testdb, chain, config) - - var tests = []struct { - name string - startingArguments arguments - expected *statediff.StateDiff - }{ - { - "testEmptyDiff", - arguments{ - oldStateRoot: block0.Root(), - newStateRoot: block0.Root(), - blockNumber: block0.Number(), - blockHash: block0Hash, - }, - &statediff.StateDiff{ - BlockNumber: block0.Number(), - BlockHash: block0Hash, - CreatedAccounts: emptyAccountDiffEventualMap, - DeletedAccounts: emptyAccountDiffEventualMap, - UpdatedAccounts: emptyAccountDiffIncrementalMap, - }, - }, - { - "testBlock1", - //10000 transferred from testBankAddress to account1Addr - arguments{ - oldStateRoot: block0.Root(), - newStateRoot: block1.Root(), - blockNumber: block1.Number(), - blockHash: block1Hash, - }, - &statediff.StateDiff{ - BlockNumber: block1.Number(), - BlockHash: block1.Hash(), - CreatedAccounts: []statediff.AccountDiff{ - { - Leaf: true, - Key: burnLeafKey.Bytes(), - Value: burnAccount1, - Proof: [][]byte{{248, 113, 160, 87, 118, 82, 182, 37, 183, 123, 219, 91, 247, 123, 196, 63, 49, 37, 202, 215, 70, 77, 103, 157, 21, 117, 86, 82, 119, 211, 97, 27, 128, 83, 231, 128, 128, 128, 128, 160, 254, 136, 159, 16, 229, 219, 143, 44, 43, 243, 85, 146, 129, 82, 161, 127, 110, 59, 185, 154, 146, 65, 172, 109, 132, 199, 126, 98, 100, 80, 156, 121, 128, 128, 128, 128, 128, 128, 128, 128, 160, 17, 219, 12, 218, 52, 168, 150, 218, 190, 182, 131, 155, 176, 106, 56, 244, 149, 20, 207, 164, 134, 67, 89, 132, 235, 1, 59, 125, 249, 238, 133, 197, 128, 128}, - {248, 113, 160, 51, 128, 199, 183, 174, 129, 165, 142, 185, 141, 156, 120, 222, 74, 31, 215, 253, 149, 53, 252, 149, 62, 210, 190, 96, 45, 170, 164, 23, 103, 49, 42, 184, 78, 248, 76, 128, 136, 27, 193, 109, 103, 78, 200, 0, 0, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112}}, - Path: []byte{5, 3, 8, 0, 12, 7, 11, 7, 10, 14, 8, 1, 10, 5, 8, 14, 11, 9, 8, 13, 9, 12, 7, 8, 13, 14, 4, 10, 1, 15, 13, 7, 15, 13, 9, 5, 3, 5, 15, 12, 9, 5, 3, 14, 13, 2, 11, 14, 6, 0, 2, 13, 10, 10, 10, 4, 1, 7, 6, 7, 3, 1, 2, 10, 16}, - Storage: []statediff.StorageDiff{}, - }, - { - Leaf: true, - Key: testhelpers.Account1LeafKey.Bytes(), - Value: account1, - Proof: [][]byte{{248, 113, 160, 87, 118, 82, 182, 37, 183, 123, 219, 91, 247, 123, 196, 63, 49, 37, 202, 215, 70, 77, 103, 157, 21, 117, 86, 82, 119, 211, 97, 27, 128, 83, 231, 128, 128, 128, 128, 160, 254, 136, 159, 16, 229, 219, 143, 44, 43, 243, 85, 146, 129, 82, 161, 127, 110, 59, 185, 154, 146, 65, 172, 109, 132, 199, 126, 98, 100, 80, 156, 121, 128, 128, 128, 128, 128, 128, 128, 128, 160, 17, 219, 12, 218, 52, 168, 150, 218, 190, 182, 131, 155, 176, 106, 56, 244, 149, 20, 207, 164, 134, 67, 89, 132, 235, 1, 59, 125, 249, 238, 133, 197, 128, 128}, - {248, 107, 160, 57, 38, 219, 105, 170, 206, 213, 24, 233, 185, 240, 244, 52, 164, 115, 231, 23, 65, 9, 201, 67, 84, 139, 184, 242, 59, 228, 28, 167, 109, 154, 210, 184, 72, 248, 70, 128, 130, 39, 16, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112}}, - Path: []byte{14, 9, 2, 6, 13, 11, 6, 9, 10, 10, 12, 14, 13, 5, 1, 8, 14, 9, 11, 9, 15, 0, 15, 4, 3, 4, 10, 4, 7, 3, 14, 7, 1, 7, 4, 1, 0, 9, 12, 9, 4, 3, 5, 4, 8, 11, 11, 8, 15, 2, 3, 11, 14, 4, 1, 12, 10, 7, 6, 13, 9, 10, 13, 2, 16}, - Storage: []statediff.StorageDiff{}, - }, - }, - DeletedAccounts: emptyAccountDiffEventualMap, - UpdatedAccounts: []statediff.AccountDiff{ - { - Leaf: true, - Key: testhelpers.BankLeafKey.Bytes(), - Value: bankAccount1, - Proof: [][]byte{{248, 113, 160, 87, 118, 82, 182, 37, 183, 123, 219, 91, 247, 123, 196, 63, 49, 37, 202, 215, 70, 77, 103, 157, 21, 117, 86, 82, 119, 211, 97, 27, 128, 83, 231, 128, 128, 128, 128, 160, 254, 136, 159, 16, 229, 219, 143, 44, 43, 243, 85, 146, 129, 82, 161, 127, 110, 59, 185, 154, 146, 65, 172, 109, 132, 199, 126, 98, 100, 80, 156, 121, 128, 128, 128, 128, 128, 128, 128, 128, 160, 17, 219, 12, 218, 52, 168, 150, 218, 190, 182, 131, 155, 176, 106, 56, 244, 149, 20, 207, 164, 134, 67, 89, 132, 235, 1, 59, 125, 249, 238, 133, 197, 128, 128}, - {248, 109, 160, 48, 191, 73, 244, 64, 161, 205, 5, 39, 228, 208, 110, 39, 101, 101, 76, 15, 86, 69, 34, 87, 81, 109, 121, 58, 155, 141, 96, 77, 207, 223, 42, 184, 74, 248, 72, 1, 132, 5, 245, 185, 240, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112}}, - Path: []byte{0, 0, 11, 15, 4, 9, 15, 4, 4, 0, 10, 1, 12, 13, 0, 5, 2, 7, 14, 4, 13, 0, 6, 14, 2, 7, 6, 5, 6, 5, 4, 12, 0, 15, 5, 6, 4, 5, 2, 2, 5, 7, 5, 1, 6, 13, 7, 9, 3, 10, 9, 11, 8, 13, 6, 0, 4, 13, 12, 15, 13, 15, 2, 10, 16}, - Storage: []statediff.StorageDiff{}, - }, - }, - }, - }, - { - "testBlock2", - //1000 transferred from testBankAddress to account1Addr - //1000 transferred from account1Addr to account2Addr - arguments{ - oldStateRoot: block1.Root(), - newStateRoot: block2.Root(), - blockNumber: block2.Number(), - blockHash: block2Hash, - }, - &statediff.StateDiff{ - BlockNumber: block2.Number(), - BlockHash: block2.Hash(), - CreatedAccounts: []statediff.AccountDiff{ - { - Leaf: true, - Key: contractLeafKey.Bytes(), - Value: contractAccount, - Proof: [][]byte{{248, 177, 160, 177, 155, 238, 178, 242, 47, 83, 2, 49, 141, 155, 92, 149, 175, 245, 120, 233, 177, 101, 67, 46, 200, 23, 250, 41, 74, 135, 94, 61, 133, 51, 162, 128, 128, 128, 128, 160, 179, 86, 53, 29, 96, 188, 152, 148, 207, 31, 29, 108, 182, 140, 129, 95, 1, 49, 213, 15, 29, 168, 60, 64, 35, 160, 158, 200, 85, 207, 255, 145, 160, 114, 57, 32, 11, 115, 232, 140, 238, 165, 222, 121, 226, 208, 2, 192, 216, 67, 198, 179, 31, 181, 27, 208, 243, 99, 202, 48, 148, 207, 107, 106, 177, 128, 128, 128, 128, 128, 160, 10, 173, 165, 125, 110, 240, 77, 112, 149, 100, 135, 237, 25, 228, 116, 7, 195, 9, 210, 166, 208, 148, 101, 23, 244, 238, 84, 84, 211, 249, 138, 137, 128, 160, 255, 115, 147, 190, 57, 135, 174, 188, 86, 51, 227, 70, 22, 253, 237, 49, 24, 19, 149, 199, 142, 195, 186, 244, 70, 51, 138, 0, 146, 148, 117, 60, 128, 128}, - {248, 105, 160, 49, 20, 101, 138, 116, 217, 204, 159, 122, 207, 44, 92, 214, 150, 195, 73, 77, 124, 52, 77, 120, 191, 236, 58, 221, 13, 145, 236, 78, 141, 28, 69, 184, 70, 248, 68, 1, 128, 160, 130, 30, 37, 86, 162, 144, 200, 100, 5, 248, 22, 10, 45, 102, 32, 66, 164, 49, 186, 69, 107, 157, 178, 101, 199, 155, 184, 55, 192, 75, 229, 240, 160, 117, 63, 152, 168, 212, 50, 139, 21, 99, 110, 70, 246, 111, 44, 180, 188, 134, 1, 0, 170, 23, 150, 124, 193, 69, 252, 209, 125, 29, 71, 16, 234}}, - Path: []byte{6, 1, 1, 4, 6, 5, 8, 10, 7, 4, 13, 9, 12, 12, 9, 15, 7, 10, 12, 15, 2, 12, 5, 12, 13, 6, 9, 6, 12, 3, 4, 9, 4, 13, 7, 12, 3, 4, 4, 13, 7, 8, 11, 15, 14, 12, 3, 10, 13, 13, 0, 13, 9, 1, 14, 12, 4, 14, 8, 13, 1, 12, 4, 5, 16}, - Storage: []statediff.StorageDiff{ - { - Leaf: true, - Key: originalStorageKey, - Value: originalStorageValue, - Proof: [][]byte{{227, 161, 32, 41, 13, 236, 217, 84, 139, 98, 168, 214, 3, 69, 169, 136, 56, 111, 200, 75, 166, 188, 149, 72, 64, 8, 246, 54, 47, 147, 22, 14, 243, 229, 99, 1}}, - Path: []byte{2, 9, 0, 13, 14, 12, 13, 9, 5, 4, 8, 11, 6, 2, 10, 8, 13, 6, 0, 3, 4, 5, 10, 9, 8, 8, 3, 8, 6, 15, 12, 8, 4, 11, 10, 6, 11, 12, 9, 5, 4, 8, 4, 0, 0, 8, 15, 6, 3, 6, 2, 15, 9, 3, 1, 6, 0, 14, 15, 3, 14, 5, 6, 3, 16}, - }, - }, - }, - { - Leaf: true, - Key: testhelpers.Account2LeafKey.Bytes(), - Value: account2, - Proof: [][]byte{{248, 177, 160, 177, 155, 238, 178, 242, 47, 83, 2, 49, 141, 155, 92, 149, 175, 245, 120, 233, 177, 101, 67, 46, 200, 23, 250, 41, 74, 135, 94, 61, 133, 51, 162, 128, 128, 128, 128, 160, 179, 86, 53, 29, 96, 188, 152, 148, 207, 31, 29, 108, 182, 140, 129, 95, 1, 49, 213, 15, 29, 168, 60, 64, 35, 160, 158, 200, 85, 207, 255, 145, 160, 114, 57, 32, 11, 115, 232, 140, 238, 165, 222, 121, 226, 208, 2, 192, 216, 67, 198, 179, 31, 181, 27, 208, 243, 99, 202, 48, 148, 207, 107, 106, 177, 128, 128, 128, 128, 128, 160, 10, 173, 165, 125, 110, 240, 77, 112, 149, 100, 135, 237, 25, 228, 116, 7, 195, 9, 210, 166, 208, 148, 101, 23, 244, 238, 84, 84, 211, 249, 138, 137, 128, 160, 255, 115, 147, 190, 57, 135, 174, 188, 86, 51, 227, 70, 22, 253, 237, 49, 24, 19, 149, 199, 142, 195, 186, 244, 70, 51, 138, 0, 146, 148, 117, 60, 128, 128}, - {248, 107, 160, 57, 87, 243, 226, 240, 74, 7, 100, 195, 160, 73, 27, 23, 95, 105, 146, 109, 166, 30, 251, 204, 143, 97, 250, 20, 85, 253, 45, 43, 76, 221, 69, 184, 72, 248, 70, 128, 130, 3, 232, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112}}, - Path: []byte{12, 9, 5, 7, 15, 3, 14, 2, 15, 0, 4, 10, 0, 7, 6, 4, 12, 3, 10, 0, 4, 9, 1, 11, 1, 7, 5, 15, 6, 9, 9, 2, 6, 13, 10, 6, 1, 14, 15, 11, 12, 12, 8, 15, 6, 1, 15, 10, 1, 4, 5, 5, 15, 13, 2, 13, 2, 11, 4, 12, 13, 13, 4, 5, 16}, - Storage: []statediff.StorageDiff{}, - }, - }, - DeletedAccounts: emptyAccountDiffEventualMap, - UpdatedAccounts: []statediff.AccountDiff{ - { - Leaf: true, - Key: testhelpers.BankLeafKey.Bytes(), - Value: bankAccount2, - Proof: [][]byte{{248, 177, 160, 177, 155, 238, 178, 242, 47, 83, 2, 49, 141, 155, 92, 149, 175, 245, 120, 233, 177, 101, 67, 46, 200, 23, 250, 41, 74, 135, 94, 61, 133, 51, 162, 128, 128, 128, 128, 160, 179, 86, 53, 29, 96, 188, 152, 148, 207, 31, 29, 108, 182, 140, 129, 95, 1, 49, 213, 15, 29, 168, 60, 64, 35, 160, 158, 200, 85, 207, 255, 145, 160, 114, 57, 32, 11, 115, 232, 140, 238, 165, 222, 121, 226, 208, 2, 192, 216, 67, 198, 179, 31, 181, 27, 208, 243, 99, 202, 48, 148, 207, 107, 106, 177, 128, 128, 128, 128, 128, 160, 10, 173, 165, 125, 110, 240, 77, 112, 149, 100, 135, 237, 25, 228, 116, 7, 195, 9, 210, 166, 208, 148, 101, 23, 244, 238, 84, 84, 211, 249, 138, 137, 128, 160, 255, 115, 147, 190, 57, 135, 174, 188, 86, 51, 227, 70, 22, 253, 237, 49, 24, 19, 149, 199, 142, 195, 186, 244, 70, 51, 138, 0, 146, 148, 117, 60, 128, 128}, - {248, 109, 160, 48, 191, 73, 244, 64, 161, 205, 5, 39, 228, 208, 110, 39, 101, 101, 76, 15, 86, 69, 34, 87, 81, 109, 121, 58, 155, 141, 96, 77, 207, 223, 42, 184, 74, 248, 72, 2, 132, 5, 245, 182, 8, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112}}, - Path: []byte{0, 0, 11, 15, 4, 9, 15, 4, 4, 0, 10, 1, 12, 13, 0, 5, 2, 7, 14, 4, 13, 0, 6, 14, 2, 7, 6, 5, 6, 5, 4, 12, 0, 15, 5, 6, 4, 5, 2, 2, 5, 7, 5, 1, 6, 13, 7, 9, 3, 10, 9, 11, 8, 13, 6, 0, 4, 13, 12, 15, 13, 15, 2, 10, 16}, - Storage: []statediff.StorageDiff{}, - }, - { - Leaf: true, - Key: burnLeafKey.Bytes(), - Value: burnAccount2, - Proof: [][]byte{{248, 177, 160, 177, 155, 238, 178, 242, 47, 83, 2, 49, 141, 155, 92, 149, 175, 245, 120, 233, 177, 101, 67, 46, 200, 23, 250, 41, 74, 135, 94, 61, 133, 51, 162, 128, 128, 128, 128, 160, 179, 86, 53, 29, 96, 188, 152, 148, 207, 31, 29, 108, 182, 140, 129, 95, 1, 49, 213, 15, 29, 168, 60, 64, 35, 160, 158, 200, 85, 207, 255, 145, 160, 114, 57, 32, 11, 115, 232, 140, 238, 165, 222, 121, 226, 208, 2, 192, 216, 67, 198, 179, 31, 181, 27, 208, 243, 99, 202, 48, 148, 207, 107, 106, 177, 128, 128, 128, 128, 128, 160, 10, 173, 165, 125, 110, 240, 77, 112, 149, 100, 135, 237, 25, 228, 116, 7, 195, 9, 210, 166, 208, 148, 101, 23, 244, 238, 84, 84, 211, 249, 138, 137, 128, 160, 255, 115, 147, 190, 57, 135, 174, 188, 86, 51, 227, 70, 22, 253, 237, 49, 24, 19, 149, 199, 142, 195, 186, 244, 70, 51, 138, 0, 146, 148, 117, 60, 128, 128}, - {248, 113, 160, 51, 128, 199, 183, 174, 129, 165, 142, 185, 141, 156, 120, 222, 74, 31, 215, 253, 149, 53, 252, 149, 62, 210, 190, 96, 45, 170, 164, 23, 103, 49, 42, 184, 78, 248, 76, 128, 136, 55, 130, 218, 206, 157, 144, 0, 0, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112}}, - Path: []byte{5, 3, 8, 0, 12, 7, 11, 7, 10, 14, 8, 1, 10, 5, 8, 14, 11, 9, 8, 13, 9, 12, 7, 8, 13, 14, 4, 10, 1, 15, 13, 7, 15, 13, 9, 5, 3, 5, 15, 12, 9, 5, 3, 14, 13, 2, 11, 14, 6, 0, 2, 13, 10, 10, 10, 4, 1, 7, 6, 7, 3, 1, 2, 10, 16}, - Storage: []statediff.StorageDiff{}, - }, - { - Leaf: true, - Key: testhelpers.Account1LeafKey.Bytes(), - Value: account3, - Proof: [][]byte{{248, 177, 160, 177, 155, 238, 178, 242, 47, 83, 2, 49, 141, 155, 92, 149, 175, 245, 120, 233, 177, 101, 67, 46, 200, 23, 250, 41, 74, 135, 94, 61, 133, 51, 162, 128, 128, 128, 128, 160, 179, 86, 53, 29, 96, 188, 152, 148, 207, 31, 29, 108, 182, 140, 129, 95, 1, 49, 213, 15, 29, 168, 60, 64, 35, 160, 158, 200, 85, 207, 255, 145, 160, 114, 57, 32, 11, 115, 232, 140, 238, 165, 222, 121, 226, 208, 2, 192, 216, 67, 198, 179, 31, 181, 27, 208, 243, 99, 202, 48, 148, 207, 107, 106, 177, 128, 128, 128, 128, 128, 160, 10, 173, 165, 125, 110, 240, 77, 112, 149, 100, 135, 237, 25, 228, 116, 7, 195, 9, 210, 166, 208, 148, 101, 23, 244, 238, 84, 84, 211, 249, 138, 137, 128, 160, 255, 115, 147, 190, 57, 135, 174, 188, 86, 51, 227, 70, 22, 253, 237, 49, 24, 19, 149, 199, 142, 195, 186, 244, 70, 51, 138, 0, 146, 148, 117, 60, 128, 128}, - {248, 107, 160, 57, 38, 219, 105, 170, 206, 213, 24, 233, 185, 240, 244, 52, 164, 115, 231, 23, 65, 9, 201, 67, 84, 139, 184, 242, 59, 228, 28, 167, 109, 154, 210, 184, 72, 248, 70, 2, 130, 39, 16, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112}}, - Path: []byte{14, 9, 2, 6, 13, 11, 6, 9, 10, 10, 12, 14, 13, 5, 1, 8, 14, 9, 11, 9, 15, 0, 15, 4, 3, 4, 10, 4, 7, 3, 14, 7, 1, 7, 4, 1, 0, 9, 12, 9, 4, 3, 5, 4, 8, 11, 11, 8, 15, 2, 3, 11, 14, 4, 1, 12, 10, 7, 6, 13, 9, 10, 13, 2, 16}, - Storage: []statediff.StorageDiff{}, - }, - }, - }, - }, - { - "testBlock3", - //the contract's storage is changed - //and the block is mined by account 2 - arguments{ - oldStateRoot: block2.Root(), - newStateRoot: block3.Root(), - blockNumber: block3.Number(), - blockHash: block3.Hash(), - }, - &statediff.StateDiff{ - BlockNumber: block3.Number(), - BlockHash: block3.Hash(), - CreatedAccounts: []statediff.AccountDiff{}, - DeletedAccounts: emptyAccountDiffEventualMap, - UpdatedAccounts: []statediff.AccountDiff{ - { - Leaf: true, - Key: testhelpers.BankLeafKey.Bytes(), - Value: bankAccount3, - Proof: [][]byte{{248, 177, 160, 101, 223, 138, 81, 34, 40, 229, 170, 198, 188, 136, 99, 7, 55, 33, 112, 160, 111, 181, 131, 167, 201, 131, 24, 201, 211, 177, 30, 159, 229, 246, 6, 128, 128, 128, 128, 160, 179, 86, 53, 29, 96, 188, 152, 148, 207, 31, 29, 108, 182, 140, 129, 95, 1, 49, 213, 15, 29, 168, 60, 64, 35, 160, 158, 200, 85, 207, 255, 145, 160, 32, 135, 108, 213, 150, 150, 110, 44, 170, 65, 75, 154, 74, 249, 94, 65, 74, 107, 100, 115, 39, 5, 3, 26, 22, 238, 138, 114, 254, 21, 6, 171, 128, 128, 128, 128, 128, 160, 4, 228, 121, 222, 255, 218, 60, 247, 15, 0, 34, 198, 28, 229, 180, 129, 109, 157, 68, 181, 248, 229, 200, 123, 29, 81, 145, 114, 90, 209, 205, 210, 128, 160, 255, 115, 147, 190, 57, 135, 174, 188, 86, 51, 227, 70, 22, 253, 237, 49, 24, 19, 149, 199, 142, 195, 186, 244, 70, 51, 138, 0, 146, 148, 117, 60, 128, 128}, - {248, 109, 160, 48, 191, 73, 244, 64, 161, 205, 5, 39, 228, 208, 110, 39, 101, 101, 76, 15, 86, 69, 34, 87, 81, 109, 121, 58, 155, 141, 96, 77, 207, 223, 42, 184, 74, 248, 72, 3, 132, 5, 245, 182, 8, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112}}, - Path: []byte{0, 0, 11, 15, 4, 9, 15, 4, 4, 0, 10, 1, 12, 13, 0, 5, 2, 7, 14, 4, 13, 0, 6, 14, 2, 7, 6, 5, 6, 5, 4, 12, 0, 15, 5, 6, 4, 5, 2, 2, 5, 7, 5, 1, 6, 13, 7, 9, 3, 10, 9, 11, 8, 13, 6, 0, 4, 13, 12, 15, 13, 15, 2, 10, 16}, - Storage: []statediff.StorageDiff{}, - }, - { - Leaf: true, - Key: contractLeafKey.Bytes(), - Value: contractAccount2, - Proof: [][]byte{{248, 177, 160, 101, 223, 138, 81, 34, 40, 229, 170, 198, 188, 136, 99, 7, 55, 33, 112, 160, 111, 181, 131, 167, 201, 131, 24, 201, 211, 177, 30, 159, 229, 246, 6, 128, 128, 128, 128, 160, 179, 86, 53, 29, 96, 188, 152, 148, 207, 31, 29, 108, 182, 140, 129, 95, 1, 49, 213, 15, 29, 168, 60, 64, 35, 160, 158, 200, 85, 207, 255, 145, 160, 32, 135, 108, 213, 150, 150, 110, 44, 170, 65, 75, 154, 74, 249, 94, 65, 74, 107, 100, 115, 39, 5, 3, 26, 22, 238, 138, 114, 254, 21, 6, 171, 128, 128, 128, 128, 128, 160, 4, 228, 121, 222, 255, 218, 60, 247, 15, 0, 34, 198, 28, 229, 180, 129, 109, 157, 68, 181, 248, 229, 200, 123, 29, 81, 145, 114, 90, 209, 205, 210, 128, 160, 255, 115, 147, 190, 57, 135, 174, 188, 86, 51, 227, 70, 22, 253, 237, 49, 24, 19, 149, 199, 142, 195, 186, 244, 70, 51, 138, 0, 146, 148, 117, 60, 128, 128}, - {248, 105, 160, 49, 20, 101, 138, 116, 217, 204, 159, 122, 207, 44, 92, 214, 150, 195, 73, 77, 124, 52, 77, 120, 191, 236, 58, 221, 13, 145, 236, 78, 141, 28, 69, 184, 70, 248, 68, 1, 128, 160, 113, 224, 209, 75, 43, 147, 229, 199, 249, 116, 142, 105, 225, 254, 95, 23, 73, 138, 28, 58, 195, 206, 194, 159, 150, 175, 19, 215, 248, 164, 224, 112, 160, 117, 63, 152, 168, 212, 50, 139, 21, 99, 110, 70, 246, 111, 44, 180, 188, 134, 1, 0, 170, 23, 150, 124, 193, 69, 252, 209, 125, 29, 71, 16, 234}}, - Path: []byte{6, 1, 1, 4, 6, 5, 8, 10, 7, 4, 13, 9, 12, 12, 9, 15, 7, 10, 12, 15, 2, 12, 5, 12, 13, 6, 9, 6, 12, 3, 4, 9, 4, 13, 7, 12, 3, 4, 4, 13, 7, 8, 11, 15, 14, 12, 3, 10, 13, 13, 0, 13, 9, 1, 14, 12, 4, 14, 8, 13, 1, 12, 4, 5, 16}, - Storage: []statediff.StorageDiff{ - { - Leaf: true, - Key: updatedStorageKey, - Value: updatedStorageValue, - Proof: [][]byte{{248, 81, 128, 128, 160, 79, 197, 241, 58, 178, 249, 186, 12, 45, 168, 139, 1, 81, 171, 14, 124, 244, 216, 93, 8, 204, 164, 92, 205, 146, 60, 106, 183, 99, 35, 235, 40, 128, 160, 205, 69, 114, 89, 105, 97, 21, 35, 94, 100, 199, 130, 35, 52, 214, 33, 41, 226, 241, 96, 68, 37, 167, 218, 100, 148, 243, 95, 196, 91, 229, 24, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, - {226, 160, 48, 87, 135, 250, 18, 168, 35, 224, 242, 183, 99, 28, 196, 27, 59, 168, 130, 139, 51, 33, 202, 129, 17, 17, 250, 117, 205, 58, 163, 187, 90, 206, 3}}, - Path: []byte{4, 0, 5, 7, 8, 7, 15, 10, 1, 2, 10, 8, 2, 3, 14, 0, 15, 2, 11, 7, 6, 3, 1, 12, 12, 4, 1, 11, 3, 11, 10, 8, 8, 2, 8, 11, 3, 3, 2, 1, 12, 10, 8, 1, 1, 1, 1, 1, 15, 10, 7, 5, 12, 13, 3, 10, 10, 3, 11, 11, 5, 10, 12, 14, 16}, - }, - }, - }, - { - Leaf: true, - Key: testhelpers.Account2LeafKey.Bytes(), - Value: account4, - Proof: [][]byte{{248, 177, 160, 101, 223, 138, 81, 34, 40, 229, 170, 198, 188, 136, 99, 7, 55, 33, 112, 160, 111, 181, 131, 167, 201, 131, 24, 201, 211, 177, 30, 159, 229, 246, 6, 128, 128, 128, 128, 160, 179, 86, 53, 29, 96, 188, 152, 148, 207, 31, 29, 108, 182, 140, 129, 95, 1, 49, 213, 15, 29, 168, 60, 64, 35, 160, 158, 200, 85, 207, 255, 145, 160, 32, 135, 108, 213, 150, 150, 110, 44, 170, 65, 75, 154, 74, 249, 94, 65, 74, 107, 100, 115, 39, 5, 3, 26, 22, 238, 138, 114, 254, 21, 6, 171, 128, 128, 128, 128, 128, 160, 4, 228, 121, 222, 255, 218, 60, 247, 15, 0, 34, 198, 28, 229, 180, 129, 109, 157, 68, 181, 248, 229, 200, 123, 29, 81, 145, 114, 90, 209, 205, 210, 128, 160, 255, 115, 147, 190, 57, 135, 174, 188, 86, 51, 227, 70, 22, 253, 237, 49, 24, 19, 149, 199, 142, 195, 186, 244, 70, 51, 138, 0, 146, 148, 117, 60, 128, 128}, - {248, 113, 160, 57, 87, 243, 226, 240, 74, 7, 100, 195, 160, 73, 27, 23, 95, 105, 146, 109, 166, 30, 251, 204, 143, 97, 250, 20, 85, 253, 45, 43, 76, 221, 69, 184, 78, 248, 76, 128, 136, 27, 193, 109, 103, 78, 200, 3, 232, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112}}, - Path: []byte{12, 9, 5, 7, 15, 3, 14, 2, 15, 0, 4, 10, 0, 7, 6, 4, 12, 3, 10, 0, 4, 9, 1, 11, 1, 7, 5, 15, 6, 9, 9, 2, 6, 13, 10, 6, 1, 14, 15, 11, 12, 12, 8, 15, 6, 1, 15, 10, 1, 4, 5, 5, 15, 13, 2, 13, 2, 11, 4, 12, 13, 13, 4, 5, 16}, - Storage: []statediff.StorageDiff{}, - }, - }, - }, - }, - } - - for _, test := range tests { - arguments := test.startingArguments - diff, err := builder.BuildStateDiff(arguments.oldStateRoot, arguments.newStateRoot, arguments.blockNumber, arguments.blockHash) - if err != nil { - t.Error(err) - } - receivedStateDiffRlp, err := rlp.EncodeToBytes(diff) - if err != nil { - t.Error(err) - } - expectedStateDiffRlp, err := rlp.EncodeToBytes(test.expected) - if err != nil { - t.Error(err) - } - sort.Slice(receivedStateDiffRlp, func(i, j int) bool { return receivedStateDiffRlp[i] < receivedStateDiffRlp[j] }) - sort.Slice(expectedStateDiffRlp, func(i, j int) bool { return expectedStateDiffRlp[i] < expectedStateDiffRlp[j] }) - if !bytes.Equal(receivedStateDiffRlp, expectedStateDiffRlp) { - t.Logf("Test failed: %s", test.name) - t.Errorf("actual state diff rlp: %+v\nexpected state diff rlp: %+v", receivedStateDiffRlp, expectedStateDiffRlp) - } - } -} - -func TestBuilderWithWatchedAddressList(t *testing.T) { - _, blockMap, chain := testhelpers.MakeChain(3, testhelpers.Genesis) - contractLeafKey = testhelpers.AddressToLeafKey(testhelpers.ContractAddr) - defer chain.Stop() - block0 = blockMap[block0Hash] - block1 = blockMap[block1Hash] - block2 = blockMap[block2Hash] - block3 = blockMap[block3Hash] - config := statediff.Config{ - PathsAndProofs: true, - AllNodes: false, - WatchedAddresses: []string{testhelpers.Account1Addr.Hex(), testhelpers.ContractAddr.Hex()}, - } - builder = statediff.NewBuilder(testhelpers.Testdb, chain, config) - - var tests = []struct { - name string - startingArguments arguments - expected *statediff.StateDiff - }{ - { - "testEmptyDiff", - arguments{ - oldStateRoot: block0.Root(), - newStateRoot: block0.Root(), - blockNumber: block0.Number(), - blockHash: block0Hash, - }, - &statediff.StateDiff{ - BlockNumber: block0.Number(), - BlockHash: block0Hash, - CreatedAccounts: emptyAccountDiffEventualMap, - DeletedAccounts: emptyAccountDiffEventualMap, - UpdatedAccounts: emptyAccountDiffIncrementalMap, - }, - }, - { - "testBlock1", - //10000 transferred from testBankAddress to account1Addr - arguments{ - oldStateRoot: block0.Root(), - newStateRoot: block1.Root(), - blockNumber: block1.Number(), - blockHash: block1Hash, - }, - &statediff.StateDiff{ - BlockNumber: block1.Number(), - BlockHash: block1.Hash(), - CreatedAccounts: []statediff.AccountDiff{ - { - Leaf: true, - Key: testhelpers.Account1LeafKey.Bytes(), - Value: account1, - Proof: [][]byte{{248, 113, 160, 87, 118, 82, 182, 37, 183, 123, 219, 91, 247, 123, 196, 63, 49, 37, 202, 215, 70, 77, 103, 157, 21, 117, 86, 82, 119, 211, 97, 27, 128, 83, 231, 128, 128, 128, 128, 160, 254, 136, 159, 16, 229, 219, 143, 44, 43, 243, 85, 146, 129, 82, 161, 127, 110, 59, 185, 154, 146, 65, 172, 109, 132, 199, 126, 98, 100, 80, 156, 121, 128, 128, 128, 128, 128, 128, 128, 128, 160, 17, 219, 12, 218, 52, 168, 150, 218, 190, 182, 131, 155, 176, 106, 56, 244, 149, 20, 207, 164, 134, 67, 89, 132, 235, 1, 59, 125, 249, 238, 133, 197, 128, 128}, - {248, 107, 160, 57, 38, 219, 105, 170, 206, 213, 24, 233, 185, 240, 244, 52, 164, 115, 231, 23, 65, 9, 201, 67, 84, 139, 184, 242, 59, 228, 28, 167, 109, 154, 210, 184, 72, 248, 70, 128, 130, 39, 16, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112}}, - Path: []byte{14, 9, 2, 6, 13, 11, 6, 9, 10, 10, 12, 14, 13, 5, 1, 8, 14, 9, 11, 9, 15, 0, 15, 4, 3, 4, 10, 4, 7, 3, 14, 7, 1, 7, 4, 1, 0, 9, 12, 9, 4, 3, 5, 4, 8, 11, 11, 8, 15, 2, 3, 11, 14, 4, 1, 12, 10, 7, 6, 13, 9, 10, 13, 2, 16}, - Storage: []statediff.StorageDiff{}, - }, - }, - DeletedAccounts: emptyAccountDiffEventualMap, - UpdatedAccounts: []statediff.AccountDiff{}, - }, - }, - { - "testBlock2", - //1000 transferred from testBankAddress to account1Addr - //1000 transferred from account1Addr to account2Addr - arguments{ - oldStateRoot: block1.Root(), - newStateRoot: block2.Root(), - blockNumber: block2.Number(), - blockHash: block2Hash, - }, - &statediff.StateDiff{ - BlockNumber: block2.Number(), - BlockHash: block2.Hash(), - CreatedAccounts: []statediff.AccountDiff{ - { - Leaf: true, - Key: contractLeafKey.Bytes(), - Value: contractAccount, - Proof: [][]byte{{248, 177, 160, 177, 155, 238, 178, 242, 47, 83, 2, 49, 141, 155, 92, 149, 175, 245, 120, 233, 177, 101, 67, 46, 200, 23, 250, 41, 74, 135, 94, 61, 133, 51, 162, 128, 128, 128, 128, 160, 179, 86, 53, 29, 96, 188, 152, 148, 207, 31, 29, 108, 182, 140, 129, 95, 1, 49, 213, 15, 29, 168, 60, 64, 35, 160, 158, 200, 85, 207, 255, 145, 160, 114, 57, 32, 11, 115, 232, 140, 238, 165, 222, 121, 226, 208, 2, 192, 216, 67, 198, 179, 31, 181, 27, 208, 243, 99, 202, 48, 148, 207, 107, 106, 177, 128, 128, 128, 128, 128, 160, 10, 173, 165, 125, 110, 240, 77, 112, 149, 100, 135, 237, 25, 228, 116, 7, 195, 9, 210, 166, 208, 148, 101, 23, 244, 238, 84, 84, 211, 249, 138, 137, 128, 160, 255, 115, 147, 190, 57, 135, 174, 188, 86, 51, 227, 70, 22, 253, 237, 49, 24, 19, 149, 199, 142, 195, 186, 244, 70, 51, 138, 0, 146, 148, 117, 60, 128, 128}, - {248, 105, 160, 49, 20, 101, 138, 116, 217, 204, 159, 122, 207, 44, 92, 214, 150, 195, 73, 77, 124, 52, 77, 120, 191, 236, 58, 221, 13, 145, 236, 78, 141, 28, 69, 184, 70, 248, 68, 1, 128, 160, 130, 30, 37, 86, 162, 144, 200, 100, 5, 248, 22, 10, 45, 102, 32, 66, 164, 49, 186, 69, 107, 157, 178, 101, 199, 155, 184, 55, 192, 75, 229, 240, 160, 117, 63, 152, 168, 212, 50, 139, 21, 99, 110, 70, 246, 111, 44, 180, 188, 134, 1, 0, 170, 23, 150, 124, 193, 69, 252, 209, 125, 29, 71, 16, 234}}, - Path: []byte{6, 1, 1, 4, 6, 5, 8, 10, 7, 4, 13, 9, 12, 12, 9, 15, 7, 10, 12, 15, 2, 12, 5, 12, 13, 6, 9, 6, 12, 3, 4, 9, 4, 13, 7, 12, 3, 4, 4, 13, 7, 8, 11, 15, 14, 12, 3, 10, 13, 13, 0, 13, 9, 1, 14, 12, 4, 14, 8, 13, 1, 12, 4, 5, 16}, - Storage: []statediff.StorageDiff{ - { - Leaf: true, - Key: originalStorageKey, - Value: originalStorageValue, - Proof: [][]byte{{227, 161, 32, 41, 13, 236, 217, 84, 139, 98, 168, 214, 3, 69, 169, 136, 56, 111, 200, 75, 166, 188, 149, 72, 64, 8, 246, 54, 47, 147, 22, 14, 243, 229, 99, 1}}, - Path: []byte{2, 9, 0, 13, 14, 12, 13, 9, 5, 4, 8, 11, 6, 2, 10, 8, 13, 6, 0, 3, 4, 5, 10, 9, 8, 8, 3, 8, 6, 15, 12, 8, 4, 11, 10, 6, 11, 12, 9, 5, 4, 8, 4, 0, 0, 8, 15, 6, 3, 6, 2, 15, 9, 3, 1, 6, 0, 14, 15, 3, 14, 5, 6, 3, 16}, - }, - }, - }, - }, - DeletedAccounts: emptyAccountDiffEventualMap, - UpdatedAccounts: []statediff.AccountDiff{ - { - Leaf: true, - Key: testhelpers.Account1LeafKey.Bytes(), - Value: account3, - Proof: [][]byte{{248, 177, 160, 177, 155, 238, 178, 242, 47, 83, 2, 49, 141, 155, 92, 149, 175, 245, 120, 233, 177, 101, 67, 46, 200, 23, 250, 41, 74, 135, 94, 61, 133, 51, 162, 128, 128, 128, 128, 160, 179, 86, 53, 29, 96, 188, 152, 148, 207, 31, 29, 108, 182, 140, 129, 95, 1, 49, 213, 15, 29, 168, 60, 64, 35, 160, 158, 200, 85, 207, 255, 145, 160, 114, 57, 32, 11, 115, 232, 140, 238, 165, 222, 121, 226, 208, 2, 192, 216, 67, 198, 179, 31, 181, 27, 208, 243, 99, 202, 48, 148, 207, 107, 106, 177, 128, 128, 128, 128, 128, 160, 10, 173, 165, 125, 110, 240, 77, 112, 149, 100, 135, 237, 25, 228, 116, 7, 195, 9, 210, 166, 208, 148, 101, 23, 244, 238, 84, 84, 211, 249, 138, 137, 128, 160, 255, 115, 147, 190, 57, 135, 174, 188, 86, 51, 227, 70, 22, 253, 237, 49, 24, 19, 149, 199, 142, 195, 186, 244, 70, 51, 138, 0, 146, 148, 117, 60, 128, 128}, - {248, 107, 160, 57, 38, 219, 105, 170, 206, 213, 24, 233, 185, 240, 244, 52, 164, 115, 231, 23, 65, 9, 201, 67, 84, 139, 184, 242, 59, 228, 28, 167, 109, 154, 210, 184, 72, 248, 70, 2, 130, 39, 16, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112}}, - Path: []byte{14, 9, 2, 6, 13, 11, 6, 9, 10, 10, 12, 14, 13, 5, 1, 8, 14, 9, 11, 9, 15, 0, 15, 4, 3, 4, 10, 4, 7, 3, 14, 7, 1, 7, 4, 1, 0, 9, 12, 9, 4, 3, 5, 4, 8, 11, 11, 8, 15, 2, 3, 11, 14, 4, 1, 12, 10, 7, 6, 13, 9, 10, 13, 2, 16}, - Storage: []statediff.StorageDiff{}, - }, - }, - }, - }, - { - "testBlock3", - //the contract's storage is changed - //and the block is mined by account 2 - arguments{ - oldStateRoot: block2.Root(), - newStateRoot: block3.Root(), - blockNumber: block3.Number(), - blockHash: block3.Hash(), - }, - &statediff.StateDiff{ - BlockNumber: block3.Number(), - BlockHash: block3.Hash(), - CreatedAccounts: []statediff.AccountDiff{}, - DeletedAccounts: emptyAccountDiffEventualMap, - UpdatedAccounts: []statediff.AccountDiff{ - { - Leaf: true, - Key: contractLeafKey.Bytes(), - Value: contractAccount2, - Proof: [][]byte{{248, 177, 160, 101, 223, 138, 81, 34, 40, 229, 170, 198, 188, 136, 99, 7, 55, 33, 112, 160, 111, 181, 131, 167, 201, 131, 24, 201, 211, 177, 30, 159, 229, 246, 6, 128, 128, 128, 128, 160, 179, 86, 53, 29, 96, 188, 152, 148, 207, 31, 29, 108, 182, 140, 129, 95, 1, 49, 213, 15, 29, 168, 60, 64, 35, 160, 158, 200, 85, 207, 255, 145, 160, 32, 135, 108, 213, 150, 150, 110, 44, 170, 65, 75, 154, 74, 249, 94, 65, 74, 107, 100, 115, 39, 5, 3, 26, 22, 238, 138, 114, 254, 21, 6, 171, 128, 128, 128, 128, 128, 160, 4, 228, 121, 222, 255, 218, 60, 247, 15, 0, 34, 198, 28, 229, 180, 129, 109, 157, 68, 181, 248, 229, 200, 123, 29, 81, 145, 114, 90, 209, 205, 210, 128, 160, 255, 115, 147, 190, 57, 135, 174, 188, 86, 51, 227, 70, 22, 253, 237, 49, 24, 19, 149, 199, 142, 195, 186, 244, 70, 51, 138, 0, 146, 148, 117, 60, 128, 128}, - {248, 105, 160, 49, 20, 101, 138, 116, 217, 204, 159, 122, 207, 44, 92, 214, 150, 195, 73, 77, 124, 52, 77, 120, 191, 236, 58, 221, 13, 145, 236, 78, 141, 28, 69, 184, 70, 248, 68, 1, 128, 160, 113, 224, 209, 75, 43, 147, 229, 199, 249, 116, 142, 105, 225, 254, 95, 23, 73, 138, 28, 58, 195, 206, 194, 159, 150, 175, 19, 215, 248, 164, 224, 112, 160, 117, 63, 152, 168, 212, 50, 139, 21, 99, 110, 70, 246, 111, 44, 180, 188, 134, 1, 0, 170, 23, 150, 124, 193, 69, 252, 209, 125, 29, 71, 16, 234}}, - Path: []byte{6, 1, 1, 4, 6, 5, 8, 10, 7, 4, 13, 9, 12, 12, 9, 15, 7, 10, 12, 15, 2, 12, 5, 12, 13, 6, 9, 6, 12, 3, 4, 9, 4, 13, 7, 12, 3, 4, 4, 13, 7, 8, 11, 15, 14, 12, 3, 10, 13, 13, 0, 13, 9, 1, 14, 12, 4, 14, 8, 13, 1, 12, 4, 5, 16}, - Storage: []statediff.StorageDiff{ - { - Leaf: true, - Key: updatedStorageKey, - Value: updatedStorageValue, - Proof: [][]byte{{248, 81, 128, 128, 160, 79, 197, 241, 58, 178, 249, 186, 12, 45, 168, 139, 1, 81, 171, 14, 124, 244, 216, 93, 8, 204, 164, 92, 205, 146, 60, 106, 183, 99, 35, 235, 40, 128, 160, 205, 69, 114, 89, 105, 97, 21, 35, 94, 100, 199, 130, 35, 52, 214, 33, 41, 226, 241, 96, 68, 37, 167, 218, 100, 148, 243, 95, 196, 91, 229, 24, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, - {226, 160, 48, 87, 135, 250, 18, 168, 35, 224, 242, 183, 99, 28, 196, 27, 59, 168, 130, 139, 51, 33, 202, 129, 17, 17, 250, 117, 205, 58, 163, 187, 90, 206, 3}}, - Path: []byte{4, 0, 5, 7, 8, 7, 15, 10, 1, 2, 10, 8, 2, 3, 14, 0, 15, 2, 11, 7, 6, 3, 1, 12, 12, 4, 1, 11, 3, 11, 10, 8, 8, 2, 8, 11, 3, 3, 2, 1, 12, 10, 8, 1, 1, 1, 1, 1, 15, 10, 7, 5, 12, 13, 3, 10, 10, 3, 11, 11, 5, 10, 12, 14, 16}, - }, - }, - }, - }, - }, - }, - } - - for _, test := range tests { - arguments := test.startingArguments - diff, err := builder.BuildStateDiff(arguments.oldStateRoot, arguments.newStateRoot, arguments.blockNumber, arguments.blockHash) - if err != nil { - t.Error(err) - } - receivedStateDiffRlp, err := rlp.EncodeToBytes(diff) - if err != nil { - t.Error(err) - } - expectedStateDiffRlp, err := rlp.EncodeToBytes(test.expected) - if err != nil { - t.Error(err) - } - sort.Slice(receivedStateDiffRlp, func(i, j int) bool { return receivedStateDiffRlp[i] < receivedStateDiffRlp[j] }) - sort.Slice(expectedStateDiffRlp, func(i, j int) bool { return expectedStateDiffRlp[i] < expectedStateDiffRlp[j] }) - if !bytes.Equal(receivedStateDiffRlp, expectedStateDiffRlp) { - t.Logf("Test failed: %s", test.name) - t.Errorf("actual state diff rlp: %+v\nexpected state diff rlp: %+v", receivedStateDiffRlp, expectedStateDiffRlp) - } - } -} - -/* -contract test { - - uint256[100] data; - - constructor() public { - data = [1]; - } - - function Put(uint256 addr, uint256 value) { - data[addr] = value; - } - - function Get(uint256 addr) constant returns (uint256 value) { - return data[addr]; - } -} -*/ diff --git a/vendor/github.com/ethereum/go-ethereum/statediff/service.go b/vendor/github.com/ethereum/go-ethereum/statediff/service.go index 5e3f3e59..374734dc 100644 --- a/vendor/github.com/ethereum/go-ethereum/statediff/service.go +++ b/vendor/github.com/ethereum/go-ethereum/statediff/service.go @@ -35,7 +35,7 @@ import ( ) type blockChain interface { - SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription + SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription GetBlockByHash(hash common.Hash) *types.Block AddToStateDiffProcessedCollection(hash common.Hash) } @@ -45,7 +45,7 @@ type IService interface { // APIs(), Protocols(), Start() and Stop() node.Service // Main event loop for processing state diffs - Loop(chainEventCh chan core.ChainEvent) + Loop(chainEventCh chan core.ChainHeadEvent) // Method to subscribe to receive state diff processing output Subscribe(id rpc.ID, sub chan<- Payload, quitChan chan<- bool) // Method to unsubscribe from state diff processing @@ -100,8 +100,8 @@ func (sds *Service) APIs() []rpc.API { } // Loop is the main processing method -func (sds *Service) Loop(chainEventCh chan core.ChainEvent) { - chainEventSub := sds.BlockChain.SubscribeChainEvent(chainEventCh) +func (sds *Service) Loop(chainEventCh chan core.ChainHeadEvent) { + chainEventSub := sds.BlockChain.SubscribeChainHeadEvent(chainEventCh) defer chainEventSub.Unsubscribe() errCh := chainEventSub.Err() @@ -129,11 +129,11 @@ func (sds *Service) Loop(chainEventCh chan core.ChainEvent) { log.Error("Error building statediff", "block number", currentBlock.Number(), "error", err) } case err := <-errCh: - log.Warn("Error from chain event subscription, breaking loop.", "error", err) + log.Warn("Error from chain event subscription, breaking loop", "error", err) sds.close() return case <-sds.QuitChan: - log.Info("Quitting the statediff block channel") + log.Info("Quitting the statediffing process") sds.close() return } @@ -195,7 +195,7 @@ func (sds *Service) Unsubscribe(id rpc.ID) error { func (sds *Service) Start(*p2p.Server) error { log.Info("Starting statediff service") - chainEventCh := make(chan core.ChainEvent, 10) + chainEventCh := make(chan core.ChainHeadEvent, 10) go sds.Loop(chainEventCh) return nil @@ -214,9 +214,9 @@ func (sds *Service) send(payload Payload) { for id, sub := range sds.Subscriptions { select { case sub.PayloadChan <- payload: - log.Info("sending state diff payload to subscription %s", id) + log.Info(fmt.Sprintf("sending state diff payload to subscription %s", id)) default: - log.Info("unable to send payload to subscription %s; channel has no receiver", id) + log.Info(fmt.Sprintf("unable to send payload to subscription %s", id)) } } sds.Unlock() @@ -229,9 +229,9 @@ func (sds *Service) close() { select { case sub.QuitChan <- true: delete(sds.Subscriptions, id) - log.Info("closing subscription %s", id) + log.Info(fmt.Sprintf("closing subscription %s", id)) default: - log.Info("unable to close subscription %s; channel has no receiver", id) + log.Info(fmt.Sprintf("unable to close subscription %s; channel has no receiver", id)) } } sds.Unlock() diff --git a/vendor/github.com/ethereum/go-ethereum/statediff/service_test.go b/vendor/github.com/ethereum/go-ethereum/statediff/service_test.go deleted file mode 100644 index d5edee04..00000000 --- a/vendor/github.com/ethereum/go-ethereum/statediff/service_test.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package statediff_test - -import ( - "bytes" - "math/big" - "math/rand" - "reflect" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/rpc" - "github.com/ethereum/go-ethereum/statediff" - "github.com/ethereum/go-ethereum/statediff/testhelpers/mocks" -) - -func TestServiceLoop(t *testing.T) { - testErrorInChainEventLoop(t) - testErrorInBlockLoop(t) -} - -var ( - eventsChannel = make(chan core.ChainEvent, 1) - - parentRoot1 = common.HexToHash("0x01") - parentRoot2 = common.HexToHash("0x02") - parentHeader1 = types.Header{Number: big.NewInt(rand.Int63()), Root: parentRoot1} - parentHeader2 = types.Header{Number: big.NewInt(rand.Int63()), Root: parentRoot2} - - parentBlock1 = types.NewBlock(&parentHeader1, nil, nil, nil) - parentBlock2 = types.NewBlock(&parentHeader2, nil, nil, nil) - - parentHash1 = parentBlock1.Hash() - parentHash2 = parentBlock2.Hash() - - testRoot1 = common.HexToHash("0x03") - testRoot2 = common.HexToHash("0x04") - testRoot3 = common.HexToHash("0x04") - header1 = types.Header{ParentHash: parentHash1, Root: testRoot1} - header2 = types.Header{ParentHash: parentHash2, Root: testRoot2} - header3 = types.Header{ParentHash: common.HexToHash("parent hash"), Root: testRoot3} - - testBlock1 = types.NewBlock(&header1, nil, nil, nil) - testBlock2 = types.NewBlock(&header2, nil, nil, nil) - testBlock3 = types.NewBlock(&header3, nil, nil, nil) - - event1 = core.ChainEvent{Block: testBlock1} - event2 = core.ChainEvent{Block: testBlock2} - event3 = core.ChainEvent{Block: testBlock3} -) - -func testErrorInChainEventLoop(t *testing.T) { - //the first chain event causes and error (in blockchain mock) - builder := mocks.Builder{} - blockChain := mocks.BlockChain{} - service := statediff.Service{ - Builder: &builder, - BlockChain: &blockChain, - QuitChan: make(chan bool), - Subscriptions: make(map[rpc.ID]statediff.Subscription), - } - testRoot2 = common.HexToHash("0xTestRoot2") - blockChain.SetParentBlocksToReturn([]*types.Block{parentBlock1, parentBlock2}) - blockChain.SetChainEvents([]core.ChainEvent{event1, event2, event3}) - service.Loop(eventsChannel) - if !reflect.DeepEqual(builder.BlockHash, testBlock2.Hash()) { - t.Error("Test failure:", t.Name()) - t.Logf("Actual does not equal expected.\nactual:%+v\nexpected: %+v", builder.BlockHash, testBlock2.Hash()) - } - if !bytes.Equal(builder.OldStateRoot.Bytes(), parentBlock2.Root().Bytes()) { - t.Error("Test failure:", t.Name()) - t.Logf("Actual does not equal expected.\nactual:%+v\nexpected: %+v", builder.OldStateRoot, parentBlock2.Root()) - } - if !bytes.Equal(builder.NewStateRoot.Bytes(), testBlock2.Root().Bytes()) { - t.Error("Test failure:", t.Name()) - t.Logf("Actual does not equal expected.\nactual:%+v\nexpected: %+v", builder.NewStateRoot, testBlock2.Root()) - } - //look up the parent block from its hash - expectedHashes := []common.Hash{testBlock1.ParentHash(), testBlock2.ParentHash()} - if !reflect.DeepEqual(blockChain.ParentHashesLookedUp, expectedHashes) { - t.Error("Test failure:", t.Name()) - t.Logf("Actual does not equal expected.\nactual:%+v\nexpected: %+v", blockChain.ParentHashesLookedUp, expectedHashes) - } -} - -func testErrorInBlockLoop(t *testing.T) { - //second block's parent block can't be found - builder := mocks.Builder{} - blockChain := mocks.BlockChain{} - service := statediff.Service{ - Builder: &builder, - BlockChain: &blockChain, - QuitChan: make(chan bool), - Subscriptions: make(map[rpc.ID]statediff.Subscription), - } - - blockChain.SetParentBlocksToReturn([]*types.Block{parentBlock1, nil}) - blockChain.SetChainEvents([]core.ChainEvent{event1, event2}) - service.Loop(eventsChannel) - - if !bytes.Equal(builder.BlockHash.Bytes(), testBlock1.Hash().Bytes()) { - t.Error("Test failure:", t.Name()) - t.Logf("Actual does not equal expected.\nactual:%+v\nexpected: %+v", builder.BlockHash, testBlock1.Hash()) - } - if !bytes.Equal(builder.OldStateRoot.Bytes(), parentBlock1.Root().Bytes()) { - t.Error("Test failure:", t.Name()) - t.Logf("Actual does not equal expected.\nactual:%+v\nexpected: %+v", builder.OldStateRoot, parentBlock1.Root()) - } - if !bytes.Equal(builder.NewStateRoot.Bytes(), testBlock1.Root().Bytes()) { - t.Error("Test failure:", t.Name()) - t.Logf("Actual does not equal expected.\nactual:%+v\nexpected: %+v", builder.NewStateRoot, testBlock1.Root()) - } -} diff --git a/vendor/github.com/ethereum/go-ethereum/statediff/testhelpers/mocks/service_test.go b/vendor/github.com/ethereum/go-ethereum/statediff/testhelpers/mocks/service_test.go deleted file mode 100644 index 4b4ac95a..00000000 --- a/vendor/github.com/ethereum/go-ethereum/statediff/testhelpers/mocks/service_test.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package mocks - -import ( - "bytes" - "math/big" - "sort" - "sync" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/rpc" - "github.com/ethereum/go-ethereum/statediff" - "github.com/ethereum/go-ethereum/statediff/testhelpers" -) - -var block0, block1 *types.Block -var burnLeafKey = testhelpers.AddressToLeafKey(common.HexToAddress("0x0")) -var emptyAccountDiffEventualMap = make([]statediff.AccountDiff, 0) -var account1, _ = rlp.EncodeToBytes(state.Account{ - Nonce: uint64(0), - Balance: big.NewInt(10000), - CodeHash: common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes(), - Root: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), -}) -var burnAccount1, _ = rlp.EncodeToBytes(state.Account{ - Nonce: uint64(0), - Balance: big.NewInt(2000000000000000000), - CodeHash: common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes(), - Root: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), -}) -var bankAccount1, _ = rlp.EncodeToBytes(state.Account{ - Nonce: uint64(1), - Balance: big.NewInt(testhelpers.TestBankFunds.Int64() - 10000), - CodeHash: common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes(), - Root: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), -}) - -func TestAPI(t *testing.T) { - _, blockMap, chain := testhelpers.MakeChain(3, testhelpers.Genesis) - defer chain.Stop() - block0Hash := common.HexToHash("0xd1721cfd0b29c36fd7a68f25c128e86413fb666a6e1d68e89b875bd299262661") - block1Hash := common.HexToHash("0xbbe88de60ba33a3f18c0caa37d827bfb70252e19e40a07cd34041696c35ecb1a") - block0 = blockMap[block0Hash] - block1 = blockMap[block1Hash] - blockChan := make(chan *types.Block) - parentBlockChain := make(chan *types.Block) - serviceQuitChan := make(chan bool) - config := statediff.Config{ - PathsAndProofs: true, - AllNodes: false, - } - mockService := MockStateDiffService{ - Mutex: sync.Mutex{}, - Builder: statediff.NewBuilder(testhelpers.Testdb, chain, config), - BlockChan: blockChan, - ParentBlockChan: parentBlockChain, - QuitChan: serviceQuitChan, - Subscriptions: make(map[rpc.ID]statediff.Subscription), - streamBlock: true, - } - mockService.Start(nil) - id := rpc.NewID() - payloadChan := make(chan statediff.Payload) - quitChan := make(chan bool) - mockService.Subscribe(id, payloadChan, quitChan) - blockChan <- block1 - parentBlockChain <- block0 - expectedBlockRlp, _ := rlp.EncodeToBytes(block1) - expectedStateDiff := statediff.StateDiff{ - BlockNumber: block1.Number(), - BlockHash: block1.Hash(), - CreatedAccounts: []statediff.AccountDiff{ - { - Leaf: true, - Key: burnLeafKey.Bytes(), - Value: burnAccount1, - Proof: [][]byte{{248, 113, 160, 87, 118, 82, 182, 37, 183, 123, 219, 91, 247, 123, 196, 63, 49, 37, 202, 215, 70, 77, 103, 157, 21, 117, 86, 82, 119, 211, 97, 27, 128, 83, 231, 128, 128, 128, 128, 160, 254, 136, 159, 16, 229, 219, 143, 44, 43, 243, 85, 146, 129, 82, 161, 127, 110, 59, 185, 154, 146, 65, 172, 109, 132, 199, 126, 98, 100, 80, 156, 121, 128, 128, 128, 128, 128, 128, 128, 128, 160, 17, 219, 12, 218, 52, 168, 150, 218, 190, 182, 131, 155, 176, 106, 56, 244, 149, 20, 207, 164, 134, 67, 89, 132, 235, 1, 59, 125, 249, 238, 133, 197, 128, 128}, - {248, 113, 160, 51, 128, 199, 183, 174, 129, 165, 142, 185, 141, 156, 120, 222, 74, 31, 215, 253, 149, 53, 252, 149, 62, 210, 190, 96, 45, 170, 164, 23, 103, 49, 42, 184, 78, 248, 76, 128, 136, 27, 193, 109, 103, 78, 200, 0, 0, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112}}, - Path: []byte{5, 3, 8, 0, 12, 7, 11, 7, 10, 14, 8, 1, 10, 5, 8, 14, 11, 9, 8, 13, 9, 12, 7, 8, 13, 14, 4, 10, 1, 15, 13, 7, 15, 13, 9, 5, 3, 5, 15, 12, 9, 5, 3, 14, 13, 2, 11, 14, 6, 0, 2, 13, 10, 10, 10, 4, 1, 7, 6, 7, 3, 1, 2, 10, 16}, - Storage: []statediff.StorageDiff{}, - }, - { - Leaf: true, - Key: testhelpers.Account1LeafKey.Bytes(), - Value: account1, - Proof: [][]byte{{248, 113, 160, 87, 118, 82, 182, 37, 183, 123, 219, 91, 247, 123, 196, 63, 49, 37, 202, 215, 70, 77, 103, 157, 21, 117, 86, 82, 119, 211, 97, 27, 128, 83, 231, 128, 128, 128, 128, 160, 254, 136, 159, 16, 229, 219, 143, 44, 43, 243, 85, 146, 129, 82, 161, 127, 110, 59, 185, 154, 146, 65, 172, 109, 132, 199, 126, 98, 100, 80, 156, 121, 128, 128, 128, 128, 128, 128, 128, 128, 160, 17, 219, 12, 218, 52, 168, 150, 218, 190, 182, 131, 155, 176, 106, 56, 244, 149, 20, 207, 164, 134, 67, 89, 132, 235, 1, 59, 125, 249, 238, 133, 197, 128, 128}, - {248, 107, 160, 57, 38, 219, 105, 170, 206, 213, 24, 233, 185, 240, 244, 52, 164, 115, 231, 23, 65, 9, 201, 67, 84, 139, 184, 242, 59, 228, 28, 167, 109, 154, 210, 184, 72, 248, 70, 128, 130, 39, 16, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112}}, - Path: []byte{14, 9, 2, 6, 13, 11, 6, 9, 10, 10, 12, 14, 13, 5, 1, 8, 14, 9, 11, 9, 15, 0, 15, 4, 3, 4, 10, 4, 7, 3, 14, 7, 1, 7, 4, 1, 0, 9, 12, 9, 4, 3, 5, 4, 8, 11, 11, 8, 15, 2, 3, 11, 14, 4, 1, 12, 10, 7, 6, 13, 9, 10, 13, 2, 16}, - Storage: []statediff.StorageDiff{}, - }, - }, - DeletedAccounts: emptyAccountDiffEventualMap, - UpdatedAccounts: []statediff.AccountDiff{ - { - Leaf: true, - Key: testhelpers.BankLeafKey.Bytes(), - Value: bankAccount1, - Proof: [][]byte{{248, 113, 160, 87, 118, 82, 182, 37, 183, 123, 219, 91, 247, 123, 196, 63, 49, 37, 202, 215, 70, 77, 103, 157, 21, 117, 86, 82, 119, 211, 97, 27, 128, 83, 231, 128, 128, 128, 128, 160, 254, 136, 159, 16, 229, 219, 143, 44, 43, 243, 85, 146, 129, 82, 161, 127, 110, 59, 185, 154, 146, 65, 172, 109, 132, 199, 126, 98, 100, 80, 156, 121, 128, 128, 128, 128, 128, 128, 128, 128, 160, 17, 219, 12, 218, 52, 168, 150, 218, 190, 182, 131, 155, 176, 106, 56, 244, 149, 20, 207, 164, 134, 67, 89, 132, 235, 1, 59, 125, 249, 238, 133, 197, 128, 128}, - {248, 109, 160, 48, 191, 73, 244, 64, 161, 205, 5, 39, 228, 208, 110, 39, 101, 101, 76, 15, 86, 69, 34, 87, 81, 109, 121, 58, 155, 141, 96, 77, 207, 223, 42, 184, 74, 248, 72, 1, 132, 5, 245, 185, 240, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112}}, - Path: []byte{0, 0, 11, 15, 4, 9, 15, 4, 4, 0, 10, 1, 12, 13, 0, 5, 2, 7, 14, 4, 13, 0, 6, 14, 2, 7, 6, 5, 6, 5, 4, 12, 0, 15, 5, 6, 4, 5, 2, 2, 5, 7, 5, 1, 6, 13, 7, 9, 3, 10, 9, 11, 8, 13, 6, 0, 4, 13, 12, 15, 13, 15, 2, 10, 16}, - Storage: []statediff.StorageDiff{}, - }, - }, - } - expectedStateDiffBytes, err := rlp.EncodeToBytes(expectedStateDiff) - if err != nil { - t.Error(err) - } - sort.Slice(expectedStateDiffBytes, func(i, j int) bool { return expectedStateDiffBytes[i] < expectedStateDiffBytes[j] }) - - select { - case payload := <-payloadChan: - if !bytes.Equal(payload.BlockRlp, expectedBlockRlp) { - t.Errorf("payload does not have expected block\r\actual block rlp: %v\r\nexpected block rlp: %v", payload.BlockRlp, expectedBlockRlp) - } - sort.Slice(payload.StateDiffRlp, func(i, j int) bool { return payload.StateDiffRlp[i] < payload.StateDiffRlp[j] }) - if !bytes.Equal(payload.StateDiffRlp, expectedStateDiffBytes) { - t.Errorf("payload does not have expected state diff\r\actual state diff rlp: %v\r\nexpected state diff rlp: %v", payload.StateDiffRlp, expectedStateDiffBytes) - } - if payload.Err != nil { - t.Errorf("payload should not contain an error, but does: %v", payload.Err) - } - case <-quitChan: - t.Errorf("channel quit before delivering payload") - } -} diff --git a/vendor/github.com/ipfs/go-ds-badger/.travis.yml b/vendor/github.com/ipfs/go-ds-badger/.travis.yml new file mode 100644 index 00000000..4cfe98c2 --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-badger/.travis.yml @@ -0,0 +1,32 @@ +os: + - linux + +language: go + +go: + - 1.11.x + +env: + global: + - GOTFLAGS="-race" + matrix: + - BUILD_DEPTYPE=gx + - BUILD_DEPTYPE=gomod + + +# disable travis install +install: + - true + +script: + - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) + + +cache: + directories: + - $GOPATH/src/gx + - $GOPATH/pkg/mod + - $HOME/.cache/go-build + +notifications: + email: false diff --git a/vendor/github.com/ipfs/go-ds-badger/LICENSE b/vendor/github.com/ipfs/go-ds-badger/LICENSE new file mode 100644 index 00000000..1e2cfe14 --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-badger/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2016 Łukasz Magiera + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/ipfs/go-ds-badger/Makefile b/vendor/github.com/ipfs/go-ds-badger/Makefile new file mode 100644 index 00000000..785abac8 --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-badger/Makefile @@ -0,0 +1,14 @@ + +test: deps + go test -race -v ./... + +export IPFS_API ?= v04x.ipfs.io + +gx: + go get github.com/whyrusleeping/gx + go get github.com/whyrusleeping/gx-go + +deps: gx + gx --verbose install --global + gx-go rewrite + go get -t ./... diff --git a/vendor/github.com/ipfs/go-ds-badger/README.md b/vendor/github.com/ipfs/go-ds-badger/README.md new file mode 100644 index 00000000..b3e55c63 --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-badger/README.md @@ -0,0 +1,28 @@ +# go-ds-badger + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) +[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) +[![standard-readme compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) +[![GoDoc](https://godoc.org/github.com/ipfs/go-ds-badger?status.svg)](https://godoc.org/github.com/ipfs/go-ds-badger) +[![Build Status](https://travis-ci.org/ipfs/go-ds-badger.svg?branch=master)](https://travis-ci.org/ipfs/go-ds-badger) + +> Datastore implementation using [badger](https://github.com/dgraph-io/badger) as backend. + +## Documentation + +https://godoc.org/github.com/ipfs/go-ds-badger + +## Contribute + +Feel free to join in. All welcome. Open an [issue](https://github.com/ipfs/go-ds-badger/issues)! + +This repository falls under the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). + +### Want to hack on IPFS? + +[![](https://cdn.rawgit.com/jbenet/contribute-ipfs-gif/master/img/contribute.gif)](https://github.com/ipfs/community/blob/master/contributing.md) + +## License + +MIT diff --git a/vendor/github.com/ipfs/go-ds-badger/codecov.yml b/vendor/github.com/ipfs/go-ds-badger/codecov.yml new file mode 100644 index 00000000..5f88a9ea --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-badger/codecov.yml @@ -0,0 +1,3 @@ +coverage: + range: "50...100" +comment: off diff --git a/vendor/github.com/ipfs/go-ds-badger/datastore.go b/vendor/github.com/ipfs/go-ds-badger/datastore.go new file mode 100644 index 00000000..8204e04e --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-badger/datastore.go @@ -0,0 +1,608 @@ +package badger + +import ( + "errors" + "fmt" + "strings" + "sync" + "time" + + badger "github.com/dgraph-io/badger" + ds "github.com/ipfs/go-datastore" + dsq "github.com/ipfs/go-datastore/query" + logger "github.com/ipfs/go-log" + goprocess "github.com/jbenet/goprocess" +) + +var log = logger.Logger("badger") + +var ErrClosed = errors.New("datastore closed") + +type Datastore struct { + DB *badger.DB + + closeLk sync.RWMutex + closed bool + closeOnce sync.Once + closing chan struct{} + + gcDiscardRatio float64 +} + +// Implements the datastore.Txn interface, enabling transaction support for +// the badger Datastore. +type txn struct { + ds *Datastore + txn *badger.Txn + + // Whether this transaction has been implicitly created as a result of a direct Datastore + // method invocation. + implicit bool +} + +// Options are the badger datastore options, reexported here for convenience. +type Options struct { + gcDiscardRatio float64 + + badger.Options +} + +// DefaultOptions are the default options for the badger datastore. +var DefaultOptions Options + +func init() { + DefaultOptions = Options{ + gcDiscardRatio: 0.1, + Options: badger.DefaultOptions, + } + DefaultOptions.Options.CompactL0OnClose = false + DefaultOptions.Options.Truncate = true +} + +var _ ds.Datastore = (*Datastore)(nil) +var _ ds.TxnDatastore = (*Datastore)(nil) +var _ ds.TTLDatastore = (*Datastore)(nil) + +// NewDatastore creates a new badger datastore. +// +// DO NOT set the Dir and/or ValuePath fields of opt, they will be set for you. +func NewDatastore(path string, options *Options) (*Datastore, error) { + // Copy the options because we modify them. + var opt badger.Options + var gcDiscardRatio float64 + if options == nil { + opt = badger.DefaultOptions + gcDiscardRatio = DefaultOptions.gcDiscardRatio + } else { + opt = options.Options + gcDiscardRatio = options.gcDiscardRatio + } + + opt.Dir = path + opt.ValueDir = path + opt.Logger = log + + kv, err := badger.Open(opt) + if err != nil { + if strings.HasPrefix(err.Error(), "manifest has unsupported version:") { + err = fmt.Errorf("unsupported badger version, use github.com/ipfs/badgerds-upgrade to upgrade: %s", err.Error()) + } + return nil, err + } + + return &Datastore{ + DB: kv, + closing: make(chan struct{}), + gcDiscardRatio: gcDiscardRatio, + }, nil +} + +// NewTransaction starts a new transaction. The resulting transaction object +// can be mutated without incurring changes to the underlying Datastore until +// the transaction is Committed. +func (d *Datastore) NewTransaction(readOnly bool) (ds.Txn, error) { + d.closeLk.RLock() + defer d.closeLk.RUnlock() + if d.closed { + return nil, ErrClosed + } + + return &txn{d, d.DB.NewTransaction(!readOnly), false}, nil +} + +// newImplicitTransaction creates a transaction marked as 'implicit'. +// Implicit transactions are created by Datastore methods performing single operations. +func (d *Datastore) newImplicitTransaction(readOnly bool) *txn { + return &txn{d, d.DB.NewTransaction(!readOnly), true} +} + +func (d *Datastore) Put(key ds.Key, value []byte) error { + d.closeLk.RLock() + defer d.closeLk.RUnlock() + if d.closed { + return ErrClosed + } + + txn := d.newImplicitTransaction(false) + defer txn.discard() + + if err := txn.put(key, value); err != nil { + return err + } + + return txn.commit() +} + +func (d *Datastore) PutWithTTL(key ds.Key, value []byte, ttl time.Duration) error { + d.closeLk.RLock() + defer d.closeLk.RUnlock() + if d.closed { + return ErrClosed + } + + txn := d.newImplicitTransaction(false) + defer txn.discard() + + if err := txn.putWithTTL(key, value, ttl); err != nil { + return err + } + + return txn.commit() +} + +func (d *Datastore) SetTTL(key ds.Key, ttl time.Duration) error { + d.closeLk.RLock() + defer d.closeLk.RUnlock() + if d.closed { + return ErrClosed + } + + txn := d.newImplicitTransaction(false) + defer txn.discard() + + if err := txn.setTTL(key, ttl); err != nil { + return err + } + + return txn.commit() +} + +func (d *Datastore) GetExpiration(key ds.Key) (time.Time, error) { + d.closeLk.RLock() + defer d.closeLk.RUnlock() + if d.closed { + return time.Time{}, ErrClosed + } + + txn := d.newImplicitTransaction(false) + defer txn.discard() + + return txn.getExpiration(key) +} + +func (d *Datastore) Get(key ds.Key) (value []byte, err error) { + d.closeLk.RLock() + defer d.closeLk.RUnlock() + if d.closed { + return nil, ErrClosed + } + + txn := d.newImplicitTransaction(true) + defer txn.discard() + + return txn.get(key) +} + +func (d *Datastore) Has(key ds.Key) (bool, error) { + d.closeLk.RLock() + defer d.closeLk.RUnlock() + if d.closed { + return false, ErrClosed + } + + txn := d.newImplicitTransaction(true) + defer txn.discard() + + return txn.has(key) +} + +func (d *Datastore) GetSize(key ds.Key) (size int, err error) { + d.closeLk.RLock() + defer d.closeLk.RUnlock() + if d.closed { + return -1, ErrClosed + } + + txn := d.newImplicitTransaction(true) + defer txn.discard() + + return txn.getSize(key) +} + +func (d *Datastore) Delete(key ds.Key) error { + d.closeLk.RLock() + defer d.closeLk.RUnlock() + + txn := d.newImplicitTransaction(false) + defer txn.discard() + + err := txn.delete(key) + if err != nil { + return err + } + + return txn.commit() +} + +func (d *Datastore) Query(q dsq.Query) (dsq.Results, error) { + d.closeLk.RLock() + defer d.closeLk.RUnlock() + + txn := d.newImplicitTransaction(true) + // We cannot defer txn.Discard() here, as the txn must remain active while the iterator is open. + // https://github.com/dgraph-io/badger/commit/b1ad1e93e483bbfef123793ceedc9a7e34b09f79 + // The closing logic in the query goprocess takes care of discarding the implicit transaction. + return txn.query(q) +} + +// DiskUsage implements the PersistentDatastore interface. +// It returns the sum of lsm and value log files sizes in bytes. +func (d *Datastore) DiskUsage() (uint64, error) { + d.closeLk.RLock() + defer d.closeLk.RUnlock() + if d.closed { + return 0, ErrClosed + } + lsm, vlog := d.DB.Size() + return uint64(lsm + vlog), nil +} + +func (d *Datastore) Close() error { + d.closeOnce.Do(func() { + close(d.closing) + }) + d.closeLk.Lock() + defer d.closeLk.Unlock() + if d.closed { + return ErrClosed + } + d.closed = true + return d.DB.Close() +} + +func (d *Datastore) Batch() (ds.Batch, error) { + tx, _ := d.NewTransaction(false) + return tx, nil +} + +func (d *Datastore) CollectGarbage() error { + d.closeLk.RLock() + defer d.closeLk.RUnlock() + if d.closed { + return ErrClosed + } + + err := d.DB.RunValueLogGC(d.gcDiscardRatio) + if err == badger.ErrNoRewrite { + err = nil + } + return err +} + +var _ ds.Datastore = (*txn)(nil) +var _ ds.TTLDatastore = (*txn)(nil) + +func (t *txn) Put(key ds.Key, value []byte) error { + t.ds.closeLk.RLock() + defer t.ds.closeLk.RUnlock() + if t.ds.closed { + return ErrClosed + } + return t.put(key, value) +} + +func (t *txn) put(key ds.Key, value []byte) error { + return t.txn.Set(key.Bytes(), value) +} + +func (t *txn) PutWithTTL(key ds.Key, value []byte, ttl time.Duration) error { + t.ds.closeLk.RLock() + defer t.ds.closeLk.RUnlock() + if t.ds.closed { + return ErrClosed + } + return t.putWithTTL(key, value, ttl) +} + +func (t *txn) putWithTTL(key ds.Key, value []byte, ttl time.Duration) error { + return t.txn.SetWithTTL(key.Bytes(), value, ttl) +} + +func (t *txn) GetExpiration(key ds.Key) (time.Time, error) { + t.ds.closeLk.RLock() + defer t.ds.closeLk.RUnlock() + if t.ds.closed { + return time.Time{}, ErrClosed + } + + return t.getExpiration(key) +} + +func (t *txn) getExpiration(key ds.Key) (time.Time, error) { + item, err := t.txn.Get(key.Bytes()) + if err == badger.ErrKeyNotFound { + return time.Time{}, ds.ErrNotFound + } else if err != nil { + return time.Time{}, err + } + return time.Unix(int64(item.ExpiresAt()), 0), nil +} + +func (t *txn) SetTTL(key ds.Key, ttl time.Duration) error { + t.ds.closeLk.RLock() + defer t.ds.closeLk.RUnlock() + if t.ds.closed { + return ErrClosed + } + + return t.setTTL(key, ttl) +} + +func (t *txn) setTTL(key ds.Key, ttl time.Duration) error { + item, err := t.txn.Get(key.Bytes()) + if err != nil { + return err + } + return item.Value(func(data []byte) error { + return t.putWithTTL(key, data, ttl) + }) + +} + +func (t *txn) Get(key ds.Key) ([]byte, error) { + t.ds.closeLk.RLock() + defer t.ds.closeLk.RUnlock() + if t.ds.closed { + return nil, ErrClosed + } + + return t.get(key) +} + +func (t *txn) get(key ds.Key) ([]byte, error) { + item, err := t.txn.Get(key.Bytes()) + if err == badger.ErrKeyNotFound { + err = ds.ErrNotFound + } + if err != nil { + return nil, err + } + + return item.ValueCopy(nil) +} + +func (t *txn) Has(key ds.Key) (bool, error) { + t.ds.closeLk.RLock() + defer t.ds.closeLk.RUnlock() + if t.ds.closed { + return false, ErrClosed + } + + return t.has(key) +} + +func (t *txn) has(key ds.Key) (bool, error) { + _, err := t.txn.Get(key.Bytes()) + switch err { + case badger.ErrKeyNotFound: + return false, nil + case nil: + return true, nil + default: + return false, err + } +} + +func (t *txn) GetSize(key ds.Key) (int, error) { + t.ds.closeLk.RLock() + defer t.ds.closeLk.RUnlock() + if t.ds.closed { + return -1, ErrClosed + } + + return t.getSize(key) +} + +func (t *txn) getSize(key ds.Key) (int, error) { + item, err := t.txn.Get(key.Bytes()) + switch err { + case nil: + return int(item.ValueSize()), nil + case badger.ErrKeyNotFound: + return -1, ds.ErrNotFound + default: + return -1, err + } +} + +func (t *txn) Delete(key ds.Key) error { + t.ds.closeLk.RLock() + defer t.ds.closeLk.RUnlock() + if t.ds.closed { + return ErrClosed + } + + return t.delete(key) +} + +func (t *txn) delete(key ds.Key) error { + return t.txn.Delete(key.Bytes()) +} + +func (t *txn) Query(q dsq.Query) (dsq.Results, error) { + t.ds.closeLk.RLock() + defer t.ds.closeLk.RUnlock() + if t.ds.closed { + return nil, ErrClosed + } + + return t.query(q) +} + +func (t *txn) query(q dsq.Query) (dsq.Results, error) { + prefix := []byte(q.Prefix) + opt := badger.DefaultIteratorOptions + opt.PrefetchValues = !q.KeysOnly + + // Special case order by key. + orders := q.Orders + if len(orders) > 0 { + switch q.Orders[0].(type) { + case dsq.OrderByKey, *dsq.OrderByKey: + // Already ordered by key. + orders = nil + case dsq.OrderByKeyDescending, *dsq.OrderByKeyDescending: + orders = nil + opt.Reverse = true + } + } + + txn := t.txn + + it := txn.NewIterator(opt) + it.Seek(prefix) + + if q.Offset > 0 { + for j := 0; j < q.Offset; j++ { + it.Next() + } + } + + qrb := dsq.NewResultBuilder(q) + + qrb.Process.Go(func(worker goprocess.Process) { + t.ds.closeLk.RLock() + closedEarly := false + defer func() { + t.ds.closeLk.RUnlock() + if closedEarly { + select { + case qrb.Output <- dsq.Result{ + Error: ErrClosed, + }: + case <-qrb.Process.Closing(): + } + } + + }() + if t.ds.closed { + closedEarly = true + return + } + + // this iterator is part of an implicit transaction, so when + // we're done we must discard the transaction. It's safe to + // discard the txn it because it contains the iterator only. + if t.implicit { + defer t.discard() + } + + defer it.Close() + + for sent := 0; it.ValidForPrefix(prefix); sent++ { + if qrb.Query.Limit > 0 && sent >= qrb.Query.Limit { + break + } + + item := it.Item() + + k := string(item.Key()) + e := dsq.Entry{Key: k} + + var result dsq.Result + if !q.KeysOnly { + b, err := item.ValueCopy(nil) + if err != nil { + result = dsq.Result{Error: err} + } else { + e.Value = b + result = dsq.Result{Entry: e} + } + } else { + result = dsq.Result{Entry: e} + } + + if q.ReturnExpirations { + result.Expiration = time.Unix(int64(item.ExpiresAt()), 0) + } + + select { + case qrb.Output <- result: + case <-t.ds.closing: // datastore closing. + closedEarly = true + return + case <-worker.Closing(): // client told us to close early + return + } + + it.Next() + } + + return + }) + + go qrb.Process.CloseAfterChildren() + + // Now, apply remaining things (filters, order) + qr := qrb.Results() + for _, f := range q.Filters { + qr = dsq.NaiveFilter(qr, f) + } + if len(orders) > 0 { + qr = dsq.NaiveOrder(qr, orders...) + } + + return qr, nil +} + +func (t *txn) Commit() error { + t.ds.closeLk.RLock() + defer t.ds.closeLk.RUnlock() + if t.ds.closed { + return ErrClosed + } + + return t.commit() +} + +func (t *txn) commit() error { + return t.txn.Commit() +} + +// Alias to commit +func (t *txn) Close() error { + t.ds.closeLk.RLock() + defer t.ds.closeLk.RUnlock() + if t.ds.closed { + return ErrClosed + } + return t.close() +} + +func (t *txn) close() error { + return t.txn.Commit() +} + +func (t *txn) Discard() { + t.ds.closeLk.RLock() + defer t.ds.closeLk.RUnlock() + if t.ds.closed { + return + } + + t.discard() +} + +func (t *txn) discard() { + t.txn.Discard() +} diff --git a/vendor/github.com/ipfs/go-ds-badger/go.mod b/vendor/github.com/ipfs/go-ds-badger/go.mod new file mode 100644 index 00000000..b5ce2565 --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-badger/go.mod @@ -0,0 +1,13 @@ +module github.com/ipfs/go-ds-badger + +require ( + github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7 // indirect + github.com/dgraph-io/badger v2.0.0-rc.2+incompatible + github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f // indirect + github.com/dustin/go-humanize v1.0.0 // indirect + github.com/golang/protobuf v1.3.0 // indirect + github.com/ipfs/go-datastore v0.0.1 + github.com/ipfs/go-log v0.0.1 + github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 + github.com/pkg/errors v0.8.1 // indirect +) diff --git a/vendor/github.com/ipfs/go-ds-badger/go.sum b/vendor/github.com/ipfs/go-ds-badger/go.sum new file mode 100644 index 00000000..75c9054b --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-badger/go.sum @@ -0,0 +1,62 @@ +github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7 h1:PqzgE6kAMi81xWQA2QIVxjWkFHptGgC547vchpUbtFo= +github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/badger v1.5.4 h1:gVTrpUTbbr/T24uvoCaqY2KSHfNLVGm0w+hbee2HMeg= +github.com/dgraph-io/badger v1.5.4/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= +github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f h1:6itBiEUtu+gOzXZWn46bM5/qm8LlV6/byR7Yflx/y6M= +github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= +github.com/dgraph-io/badger v2.0.0-rc.2+incompatible h1:7KPp6xv5+wymkVUbkAnZZXvmDrJlf09m/7u1HG5lAYA= +github.com/dgraph-io/badger v2.0.0-rc.2+incompatible/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= +github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f h1:dDxpBYafY/GYpcl+LS4Bn3ziLPuEdGRkRjYAbSlWxSA= +github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/ipfs/go-datastore v0.0.1 h1:AW/KZCScnBWlSb5JbnEnLKFWXL224LBEh/9KXXOrUms= +github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-log v0.0.1 h1:9XTUN/rW64BCG1YhPK9Hoy3q8nr4gOmHHBpgFdfw6Lc= +github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 h1:bspPhN+oKYFk5fcGNuQzp6IGzYQSenLEgH3s6jkXrWw= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc h1:9lDbC6Rz4bwmou+oE6Dt4Cb2BGMur5eR/GYptkKUVHo= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7 h1:C2F/nMkR/9sfUTpvR3QrjBuTdvMUC/cFajkphs1YLQo= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/ipfs/go-ds-badger/package.json b/vendor/github.com/ipfs/go-ds-badger/package.json new file mode 100644 index 00000000..cca00dee --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-badger/package.json @@ -0,0 +1,41 @@ +{ + "author": "magik6k", + "bugs": { + "url": "https://github.com/ipfs/go-ds-badger" + }, + "gx": { + "dvcsimport": "github.com/ipfs/go-ds-badger" + }, + "gxDependencies": [ + { + "author": "whyrusleeping", + "hash": "QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP", + "name": "goprocess", + "version": "1.0.0" + }, + { + "author": "jbenet", + "hash": "QmUadX5EcvrBmxAV9sE7wUWtWSqxns5K84qKJBixmcT1w9", + "name": "go-datastore", + "version": "3.6.1" + }, + { + "author": "dgraph-io", + "hash": "QmU4emVTYFKnoJ5yK3pPEN9joyEx6U7y892PDx26ZtNxQd", + "name": "badger", + "version": "2.11.4" + }, + { + "hash": "QmbkT7eMTyXfpeyB3ZMxxcxg7XH8t6uXp49jqzz4HB7BGF", + "name": "go-log", + "version": "1.5.9" + } + ], + "gxVersion": "0.8.0", + "language": "go", + "license": "", + "name": "go-ds-badger", + "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", + "version": "1.12.4" +} + diff --git a/vendor/github.com/ipfs/go-ds-flatfs/.gitignore b/vendor/github.com/ipfs/go-ds-flatfs/.gitignore new file mode 100644 index 00000000..1377554e --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-flatfs/.gitignore @@ -0,0 +1 @@ +*.swp diff --git a/vendor/github.com/ipfs/go-ds-flatfs/.travis.yml b/vendor/github.com/ipfs/go-ds-flatfs/.travis.yml new file mode 100644 index 00000000..4cfe98c2 --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-flatfs/.travis.yml @@ -0,0 +1,32 @@ +os: + - linux + +language: go + +go: + - 1.11.x + +env: + global: + - GOTFLAGS="-race" + matrix: + - BUILD_DEPTYPE=gx + - BUILD_DEPTYPE=gomod + + +# disable travis install +install: + - true + +script: + - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) + + +cache: + directories: + - $GOPATH/src/gx + - $GOPATH/pkg/mod + - $HOME/.cache/go-build + +notifications: + email: false diff --git a/vendor/github.com/ipfs/go-ds-flatfs/LICENSE b/vendor/github.com/ipfs/go-ds-flatfs/LICENSE new file mode 100644 index 00000000..f2049023 --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-flatfs/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2016 Juan Batiz-Benet + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/ipfs/go-ds-flatfs/Makefile b/vendor/github.com/ipfs/go-ds-flatfs/Makefile new file mode 100644 index 00000000..54152565 --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-flatfs/Makefile @@ -0,0 +1,9 @@ +export IPFS_API ?= v04x.ipfs.io + +gx: + go get -u github.com/whyrusleeping/gx + go get -u github.com/whyrusleeping/gx-go + +deps: gx + gx --verbose install --global + gx-go rewrite diff --git a/vendor/github.com/ipfs/go-ds-flatfs/README.md b/vendor/github.com/ipfs/go-ds-flatfs/README.md new file mode 100644 index 00000000..9f51bd58 --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-flatfs/README.md @@ -0,0 +1,92 @@ +# go-ds-flatfs + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) +[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) +[![standard-readme compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) +[![GoDoc](https://godoc.org/github.com/ipfs/go-ds-flatfs?status.svg)](https://godoc.org/github.com/ipfs/go-ds-flatfs) +[![Build Status](https://travis-ci.org/ipfs/go-ds-flatfs.svg?branch=master)](https://travis-ci.org/ipfs/go-ds-flatfs) +[![Coverage Status](https://img.shields.io/codecov/c/github/ipfs/go-ds-flatfs.svg)](https://codecov.io/gh/ipfs/go-ds-flatfs) + + +> A datastore implementation using sharded directories and flat files to store data + +`go-ds-flatfs` is used by `go-ipfs` to store raw block contents on disk. It supports several sharding functions (prefix, suffix, next-to-last/*). + +## Table of Contents + +- [Install](#install) +- [Usage](#usage) +- [Contribute](#contribute) +- [License](#license) + +## Install + +`go-ds-flatfs` can be used like any Go module: + + +``` +import "github.com/ipfs/go-ds-flatfs" +``` + +`go-ds-flatfs` uses [`Gx`](https://github.com/whyrusleeping/gx) and [`Gx-go`](https://github.com/whyrusleeping/gx-go) to handle dependendencies. Run `make deps` to download and rewrite the imports to their fixed dependencies. + +## Usage + +Check the [GoDoc module documentation](https://godoc.org/github.com/ipfs/go-ds-flatfs) for an overview of this module's +functionality. + +### DiskUsage and Accuracy + +This datastore implements the [`PersistentDatastore`](https://godoc.org/github.com/ipfs/go-datastore#PersistentDatastore) interface. It offers a `DiskUsage()` method which strives to find a balance between accuracy and performance. This implies: + +* The total disk usage of a datastore is calculated when opening the datastore +* The current disk usage is cached frequently in a file in the datastore root (`diskUsage.cache` by default). This file is also +written when the datastore is closed. +* If this file is not present when the datastore is opened: + * The disk usage will be calculated by walking the datastore's directory tree and estimating the size of each folder. + * This may be a very slow operation for huge datastores or datastores with slow disks + * The operation is time-limited (5 minutes by default). + * Upon timeout, the remaining folders will be assumed to have the average of the previously processed ones. +* After opening, the disk usage is updated in every write/delete operation. + +This means that for certain datastores (huge ones, those with very slow disks or special content), the values reported by +`DiskUsage()` might be reduced accuracy and the first startup (without a `diskUsage.cache` file present), might be slow. + +If you need increased accuracy or a fast start from the first time, you can manually create or update the +`diskUsage.cache` file. + +The file `diskUsage.cache` is a JSON file with two fields `diskUsage` and `accuracy`. For example the JSON file for a +small repo might be: + +``` +{"diskUsage":6357,"accuracy":"initial-exact"} +``` + +`diskUsage` is the calculated disk usage and `accuracy` is a note on the accuracy of the initial calculation. If the +initial calculation was accurate the file will contain the value `initial-exact`. If some of the directories have too +many entries and the disk usage for that directory was estimated based on the first 2000 entries, the file will contain +`initial-approximate`. If the calculation took too long and timed out as indicated above, the file will contain +`initial-timed-out`. + +If the initial calculation timed out the JSON file might be: +``` +{"diskUsage":7589482442898,"accuracy":"initial-timed-out"} + +``` + +To fix this with a more accurate value you could do (in the datastore root): + + $ du -sb . + 7536515831332 . + $ echo -n '{"diskUsage":7536515831332,"accuracy":"initial-exact"}' > diskUsage.cache + +## Contribute + +PRs accepted. + +Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +MIT © Protocol Labs, Inc. diff --git a/vendor/github.com/ipfs/go-ds-flatfs/codecov.yml b/vendor/github.com/ipfs/go-ds-flatfs/codecov.yml new file mode 100644 index 00000000..db247200 --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-flatfs/codecov.yml @@ -0,0 +1 @@ +comment: off diff --git a/vendor/github.com/ipfs/go-ds-flatfs/convert.go b/vendor/github.com/ipfs/go-ds-flatfs/convert.go new file mode 100644 index 00000000..308a03a9 --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-flatfs/convert.go @@ -0,0 +1,182 @@ +// Package flatfs is a Datastore implementation that stores all +// objects in a two-level directory structure in the local file +// system, regardless of the hierarchy of the keys. +package flatfs + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" +) + +func UpgradeV0toV1(path string, prefixLen int) error { + fun := Prefix(prefixLen) + err := WriteShardFunc(path, fun) + if err != nil { + return err + } + err = WriteReadme(path, fun) + if err != nil { + return err + } + return nil +} + +func DowngradeV1toV0(path string) error { + fun, err := ReadShardFunc(path) + if err != nil { + return err + } else if fun.funName != "prefix" { + return fmt.Errorf("%s: can only downgrade datastore that use the 'prefix' sharding function", path) + } + + err = os.Remove(filepath.Join(path, SHARDING_FN)) + if err != nil { + return err + } + err = os.Remove(filepath.Join(path, README_FN)) + if err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +func Move(oldPath string, newPath string, out io.Writer) error { + oldDS, err := Open(oldPath, false) + if err != nil { + return fmt.Errorf("%s: %v", oldPath, err) + } + oldDS.deactivate() + newDS, err := Open(newPath, false) + if err != nil { + return fmt.Errorf("%s: %v", newPath, err) + } + newDS.deactivate() + + res, err := oldDS.Query(query.Query{KeysOnly: true}) + if err != nil { + return err + } + + if out != nil { + fmt.Fprintf(out, "Moving Keys...\n") + } + + // first move the keys + count := 0 + for { + e, ok := res.NextSync() + if !ok { + break + } + if e.Error != nil { + return e.Error + } + + err := moveKey(oldDS, newDS, datastore.RawKey(e.Key)) + if err != nil { + return err + } + + count++ + if out != nil && count%10 == 0 { + fmt.Fprintf(out, "\r%d keys so far", count) + } + } + + if out != nil { + fmt.Fprintf(out, "\nCleaning Up...\n") + } + + // now walk the old top-level directory + dir, err := os.Open(oldDS.path) + if err != nil { + return err + } + defer dir.Close() + names, err := dir.Readdirnames(-1) + if err != nil { + return err + } + for _, fn := range names { + if fn == "." || fn == ".." { + continue + } + oldPath := filepath.Join(oldDS.path, fn) + inf, err := os.Stat(oldPath) + if err != nil { + return err + } + if inf.IsDir() { + indir, err := os.Open(oldPath) + if err != nil { + return err + } + + names, err := indir.Readdirnames(-1) + indir.Close() + if err != nil { + return err + } + + for _, n := range names { + p := filepath.Join(oldPath, n) + // part of unfinished write transaction + // remove it + if strings.HasPrefix(n, "put-") { + err := os.Remove(p) + if err != nil { + return err + } + } else { + return errors.New("unknown file in flatfs: " + p) + } + } + + err = os.Remove(oldPath) + if err != nil { + return err + } + } else if fn == SHARDING_FN || fn == README_FN { + // generated file so just remove it + err := os.Remove(oldPath) + if err != nil { + return err + } + } else { + // else we found something unexpected, so to be safe just move it + log.Warningf("found unexpected file in datastore directory: \"%s\", moving anyway\n", fn) + newPath := filepath.Join(newDS.path, fn) + err := os.Rename(oldPath, newPath) + if err != nil { + return err + } + } + } + + if out != nil { + fmt.Fprintf(out, "All Done.\n") + } + + return nil +} + +func moveKey(oldDS *Datastore, newDS *Datastore, key datastore.Key) error { + _, oldPath := oldDS.encode(key) + dir, newPath := newDS.encode(key) + err := os.Mkdir(dir, 0755) + if err != nil && !os.IsExist(err) { + return err + } + err = os.Rename(oldPath, newPath) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/ipfs/go-ds-flatfs/flatfs.go b/vendor/github.com/ipfs/go-ds-flatfs/flatfs.go new file mode 100644 index 00000000..4afebabf --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-flatfs/flatfs.go @@ -0,0 +1,1101 @@ +// Package flatfs is a Datastore implementation that stores all +// objects in a two-level directory structure in the local file +// system, regardless of the hierarchy of the keys. +package flatfs + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math" + "math/rand" + "os" + "path/filepath" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + "github.com/jbenet/goprocess" + + logging "github.com/ipfs/go-log" +) + +var log = logging.Logger("flatfs") + +const ( + extension = ".data" + diskUsageMessageTimeout = 5 * time.Second + diskUsageCheckpointPercent = 1.0 + diskUsageCheckpointTimeout = 2 * time.Second +) + +var ( + // DiskUsageFile is the name of the file to cache the size of the + // datastore in disk + DiskUsageFile = "diskUsage.cache" + // DiskUsageFilesAverage is the maximum number of files per folder + // to stat in order to calculate the size of the datastore. + // The size of the rest of the files in a folder will be assumed + // to be the average of the values obtained. This includes + // regular files and directories. + DiskUsageFilesAverage = 2000 + // DiskUsageCalcTimeout is the maximum time to spend + // calculating the DiskUsage upon a start when no + // DiskUsageFile is present. + // If this period did not suffice to read the size of the datastore, + // the remaining sizes will be stimated. + DiskUsageCalcTimeout = 5 * time.Minute +) + +const ( + opPut = iota + opDelete + opRename +) + +type initAccuracy string + +const ( + unknownA initAccuracy = "unknown" + exactA initAccuracy = "initial-exact" + approxA initAccuracy = "initial-approximate" + timedoutA initAccuracy = "initial-timed-out" +) + +func combineAccuracy(a, b initAccuracy) initAccuracy { + if a == unknownA || b == unknownA { + return unknownA + } + if a == timedoutA || b == timedoutA { + return timedoutA + } + if a == approxA || b == approxA { + return approxA + } + if a == exactA && b == exactA { + return exactA + } + if a == "" { + return b + } + if b == "" { + return a + } + return unknownA +} + +var _ datastore.Datastore = (*Datastore)(nil) + +var ( + ErrDatastoreExists = errors.New("datastore already exists") + ErrDatastoreDoesNotExist = errors.New("datastore directory does not exist") + ErrShardingFileMissing = fmt.Errorf("%s file not found in datastore", SHARDING_FN) + ErrClosed = errors.New("datastore closed") +) + +func init() { + rand.Seed(time.Now().UTC().UnixNano()) +} + +// Datastore implements the go-datastore Interface. +// Note this datastore cannot guarantee order of concurrent +// write operations to the same key. See the explanation in +// Put(). +type Datastore struct { + // atmoic operations should always be used with diskUsage. + // Must be first in struct to ensure correct alignment + // (see https://golang.org/pkg/sync/atomic/#pkg-note-BUG) + diskUsage int64 + + path string + + shardStr string + getDir ShardFunc + + // sychronize all writes and directory changes for added safety + sync bool + + // these values should only be used during internalization or + // inside the checkpoint loop + dirty bool + storedValue diskUsageValue + + // Used to trigger a checkpoint. + checkpointCh chan struct{} + done chan struct{} + + shutdownLock sync.RWMutex + shutdown bool + + // opMap handles concurrent write operations (put/delete) + // to the same key + opMap *opMap +} + +type diskUsageValue struct { + DiskUsage int64 `json:"diskUsage"` + Accuracy initAccuracy `json:"accuracy"` +} + +type ShardFunc func(string) string + +type opT int + +// op wraps useful arguments of write operations +type op struct { + typ opT // operation type + key datastore.Key // datastore key. Mandatory. + tmp string // temp file path + path string // file path + v []byte // value +} + +type opMap struct { + ops sync.Map +} + +type opResult struct { + mu sync.RWMutex + success bool + + opMap *opMap + name string +} + +// Returns nil if there's nothing to do. +func (m *opMap) Begin(name string) *opResult { + for { + myOp := &opResult{opMap: m, name: name} + myOp.mu.Lock() + opIface, loaded := m.ops.LoadOrStore(name, myOp) + if !loaded { // no one else doing ops with this key + return myOp + } + + op := opIface.(*opResult) + // someone else doing ops with this key, wait for + // the result + op.mu.RLock() + if op.success { + return nil + } + + // if we are here, we will retry the operation + } +} + +func (o *opResult) Finish(ok bool) { + o.success = ok + o.opMap.ops.Delete(o.name) + o.mu.Unlock() +} + +func Create(path string, fun *ShardIdV1) error { + + err := os.Mkdir(path, 0755) + if err != nil && !os.IsExist(err) { + return err + } + + dsFun, err := ReadShardFunc(path) + switch err { + case ErrShardingFileMissing: + isEmpty, err := DirIsEmpty(path) + if err != nil { + return err + } + if !isEmpty { + return fmt.Errorf("directory missing %s file: %s", SHARDING_FN, path) + } + + err = WriteShardFunc(path, fun) + if err != nil { + return err + } + err = WriteReadme(path, fun) + return err + case nil: + if fun.String() != dsFun.String() { + return fmt.Errorf("specified shard func '%s' does not match repo shard func '%s'", + fun.String(), dsFun.String()) + } + return ErrDatastoreExists + default: + return err + } +} + +func Open(path string, syncFiles bool) (*Datastore, error) { + _, err := os.Stat(path) + if os.IsNotExist(err) { + return nil, ErrDatastoreDoesNotExist + } else if err != nil { + return nil, err + } + + shardId, err := ReadShardFunc(path) + if err != nil { + return nil, err + } + + fs := &Datastore{ + path: path, + shardStr: shardId.String(), + getDir: shardId.Func(), + sync: syncFiles, + checkpointCh: make(chan struct{}, 1), + done: make(chan struct{}), + diskUsage: 0, + opMap: new(opMap), + } + + // This sets diskUsage to the correct value + // It might be slow, but allowing it to happen + // while the datastore is usable might + // cause diskUsage to not be accurate. + err = fs.calculateDiskUsage() + if err != nil { + // Cannot stat() all + // elements in the datastore. + return nil, err + } + + go fs.checkpointLoop() + return fs, nil +} + +// convenience method +func CreateOrOpen(path string, fun *ShardIdV1, sync bool) (*Datastore, error) { + err := Create(path, fun) + if err != nil && err != ErrDatastoreExists { + return nil, err + } + return Open(path, sync) +} + +func (fs *Datastore) ShardStr() string { + return fs.shardStr +} + +func (fs *Datastore) encode(key datastore.Key) (dir, file string) { + noslash := key.String()[1:] + dir = filepath.Join(fs.path, fs.getDir(noslash)) + file = filepath.Join(dir, noslash+extension) + return dir, file +} + +func (fs *Datastore) decode(file string) (key datastore.Key, ok bool) { + if filepath.Ext(file) != extension { + return datastore.Key{}, false + } + name := file[:len(file)-len(extension)] + return datastore.NewKey(name), true +} + +func (fs *Datastore) makeDir(dir string) error { + if err := fs.makeDirNoSync(dir); err != nil { + return err + } + + // In theory, if we create a new prefix dir and add a file to + // it, the creation of the prefix dir itself might not be + // durable yet. Sync the root dir after a successful mkdir of + // a prefix dir, just to be paranoid. + if fs.sync { + if err := syncDir(fs.path); err != nil { + return err + } + } + return nil +} + +func (fs *Datastore) makeDirNoSync(dir string) error { + if err := os.Mkdir(dir, 0755); err != nil { + // EEXIST is safe to ignore here, that just means the prefix + // directory already existed. + if !os.IsExist(err) { + return err + } + return nil + } + + // Track DiskUsage of this NEW folder + fs.updateDiskUsage(dir, true) + return nil +} + +// This function always runs under an opLock. Therefore, only one thread is +// touching the affected files. +func (fs *Datastore) renameAndUpdateDiskUsage(tmpPath, path string) error { + fi, err := os.Stat(path) + + // Destination exists, we need to discount it from diskUsage + if fs != nil && err == nil { + atomic.AddInt64(&fs.diskUsage, -fi.Size()) + } else if !os.IsNotExist(err) { + return err + } + + // Rename and add new file's diskUsage. If the rename fails, + // it will either a) Re-add the size of an existing file, which + // was sustracted before b) Add 0 if there is no existing file. + err = os.Rename(tmpPath, path) + fs.updateDiskUsage(path, true) + return err +} + +var putMaxRetries = 6 + +// Put stores a key/value in the datastore. +// +// Note, that we do not guarantee order of write operations (Put or Delete) +// to the same key in this datastore. +// +// For example. i.e. in the case of two concurrent Put, we only guarantee +// that one of them will come through, but cannot assure which one even if +// one arrived slightly later than the other. In the case of a +// concurrent Put and a Delete operation, we cannot guarantee which one +// will win. +func (fs *Datastore) Put(key datastore.Key, value []byte) error { + fs.shutdownLock.RLock() + defer fs.shutdownLock.RUnlock() + if fs.shutdown { + return ErrClosed + } + + var err error + for i := 1; i <= putMaxRetries; i++ { + err = fs.doWriteOp(&op{ + typ: opPut, + key: key, + v: value, + }) + if err == nil { + break + } + + if !strings.Contains(err.Error(), "too many open files") { + break + } + + log.Errorf("too many open files, retrying in %dms", 100*i) + time.Sleep(time.Millisecond * 100 * time.Duration(i)) + } + return err +} + +func (fs *Datastore) doOp(oper *op) error { + switch oper.typ { + case opPut: + return fs.doPut(oper.key, oper.v) + case opDelete: + return fs.doDelete(oper.key) + case opRename: + return fs.renameAndUpdateDiskUsage(oper.tmp, oper.path) + default: + panic("bad operation, this is a bug") + } +} + +// doWrite optmizes out write operations (put/delete) to the same +// key by queueing them and suceeding all queued +// operations if one of them does. In such case, +// we assume that the first suceeding operation +// on that key was the last one to happen after +// all successful others. +func (fs *Datastore) doWriteOp(oper *op) error { + keyStr := oper.key.String() + + opRes := fs.opMap.Begin(keyStr) + if opRes == nil { // nothing to do, a concurrent op succeeded + return nil + } + + // Do the operation + err := fs.doOp(oper) + + // Finish it. If no error, it will signal other operations + // waiting on this result to succeed. Otherwise, they will + // retry. + opRes.Finish(err == nil) + return err +} + +func (fs *Datastore) doPut(key datastore.Key, val []byte) error { + + dir, path := fs.encode(key) + if err := fs.makeDir(dir); err != nil { + return err + } + + tmp, err := ioutil.TempFile(dir, "put-") + if err != nil { + return err + } + closed := false + removed := false + defer func() { + if !closed { + // silence errcheck + _ = tmp.Close() + } + if !removed { + // silence errcheck + _ = os.Remove(tmp.Name()) + } + }() + + if _, err := tmp.Write(val); err != nil { + return err + } + if fs.sync { + if err := syncFile(tmp); err != nil { + return err + } + } + if err := tmp.Close(); err != nil { + return err + } + closed = true + + err = fs.renameAndUpdateDiskUsage(tmp.Name(), path) + if err != nil { + return err + } + removed = true + + if fs.sync { + if err := syncDir(dir); err != nil { + return err + } + } + return nil +} + +func (fs *Datastore) putMany(data map[datastore.Key][]byte) error { + fs.shutdownLock.RLock() + defer fs.shutdownLock.RUnlock() + if fs.shutdown { + return ErrClosed + } + + var dirsToSync []string + + files := make(map[*os.File]*op, len(data)) + ops := make(map[*os.File]int, len(data)) + + defer func() { + for fi := range files { + val, _ := ops[fi] + switch val { + case 0: + _ = fi.Close() + fallthrough + case 1: + _ = os.Remove(fi.Name()) + } + } + }() + + for key, value := range data { + dir, path := fs.encode(key) + if err := fs.makeDirNoSync(dir); err != nil { + return err + } + dirsToSync = append(dirsToSync, dir) + + tmp, err := ioutil.TempFile(dir, "put-") + if err != nil { + return err + } + + if _, err := tmp.Write(value); err != nil { + return err + } + + files[tmp] = &op{ + typ: opRename, + path: path, + tmp: tmp.Name(), + key: key, + } + } + + // Now we sync everything + // sync and close files + for fi := range files { + if fs.sync { + if err := syncFile(fi); err != nil { + return err + } + } + + if err := fi.Close(); err != nil { + return err + } + + // signify closed + ops[fi] = 1 + } + + // move files to their proper places + for fi, op := range files { + err := fs.doWriteOp(op) + if err != nil { + return err + } + // signify removed + ops[fi] = 2 + } + + // now sync the dirs for those files + if fs.sync { + for _, dir := range dirsToSync { + if err := syncDir(dir); err != nil { + return err + } + } + + // sync top flatfs dir + if err := syncDir(fs.path); err != nil { + return err + } + } + + return nil +} + +func (fs *Datastore) Get(key datastore.Key) (value []byte, err error) { + _, path := fs.encode(key) + data, err := ioutil.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + return nil, datastore.ErrNotFound + } + // no specific error to return, so just pass it through + return nil, err + } + return data, nil +} + +func (fs *Datastore) Has(key datastore.Key) (exists bool, err error) { + _, path := fs.encode(key) + switch _, err := os.Stat(path); { + case err == nil: + return true, nil + case os.IsNotExist(err): + return false, nil + default: + return false, err + } +} + +func (fs *Datastore) GetSize(key datastore.Key) (size int, err error) { + _, path := fs.encode(key) + switch s, err := os.Stat(path); { + case err == nil: + return int(s.Size()), nil + case os.IsNotExist(err): + return -1, datastore.ErrNotFound + default: + return -1, err + } +} + +// Delete removes a key/value from the Datastore. Please read +// the Put() explanation about the handling of concurrent write +// operations to the same key. +func (fs *Datastore) Delete(key datastore.Key) error { + fs.shutdownLock.RLock() + defer fs.shutdownLock.RUnlock() + if fs.shutdown { + return ErrClosed + } + + return fs.doWriteOp(&op{ + typ: opDelete, + key: key, + v: nil, + }) +} + +// This function always runs within an opLock for the given +// key, and not concurrently. +func (fs *Datastore) doDelete(key datastore.Key) error { + _, path := fs.encode(key) + + fSize := fileSize(path) + + switch err := os.Remove(path); { + case err == nil: + atomic.AddInt64(&fs.diskUsage, -fSize) + fs.checkpointDiskUsage() + return nil + case os.IsNotExist(err): + return datastore.ErrNotFound + default: + return err + } +} + +func (fs *Datastore) Query(q query.Query) (query.Results, error) { + if (q.Prefix != "" && q.Prefix != "/") || + len(q.Filters) > 0 || + len(q.Orders) > 0 || + q.Limit > 0 || + q.Offset > 0 || + !q.KeysOnly { + // TODO this is overly simplistic, but the only caller is + // `ipfs refs local` for now, and this gets us moving. + return nil, errors.New("flatfs only supports listing all keys in random order") + } + + // Replicates the logic in ResultsWithChan but actually respects calls + // to `Close`. + b := query.NewResultBuilder(q) + b.Process.Go(func(p goprocess.Process) { + err := fs.walkTopLevel(fs.path, b) + if err == nil { + return + } + select { + case b.Output <- query.Result{Error: errors.New("walk failed: " + err.Error())}: + case <-p.Closing(): + } + }) + go b.Process.CloseAfterChildren() + + return b.Results(), nil +} + +func (fs *Datastore) walkTopLevel(path string, result *query.ResultBuilder) error { + dir, err := os.Open(path) + if err != nil { + return err + } + defer dir.Close() + names, err := dir.Readdirnames(-1) + if err != nil { + return err + } + for _, dir := range names { + if len(dir) == 0 || dir[0] == '.' { + continue + } + + err = fs.walk(filepath.Join(path, dir), result) + if err != nil { + return err + } + + // Are we closing? + select { + case <-result.Process.Closing(): + return nil + default: + } + } + return nil +} + +// folderSize estimates the diskUsage of a folder by reading +// up to DiskUsageFilesAverage entries in it and assumming any +// other files will have an avereage size. +func folderSize(path string, deadline time.Time) (int64, initAccuracy, error) { + var du int64 + + folder, err := os.Open(path) + if err != nil { + return 0, "", err + } + defer folder.Close() + + stat, err := folder.Stat() + if err != nil { + return 0, "", err + } + + files, err := folder.Readdirnames(-1) + if err != nil { + return 0, "", err + } + + totalFiles := len(files) + i := 0 + filesProcessed := 0 + maxFiles := DiskUsageFilesAverage + if maxFiles <= 0 { + maxFiles = totalFiles + } + + // randomize file order + // https://stackoverflow.com/a/42776696 + for i := len(files) - 1; i > 0; i-- { + j := rand.Intn(i + 1) + files[i], files[j] = files[j], files[i] + } + + accuracy := exactA + for { + // Do not process any files after deadline is over + if time.Now().After(deadline) { + accuracy = timedoutA + break + } + + if i >= totalFiles || filesProcessed >= maxFiles { + if filesProcessed >= maxFiles { + accuracy = approxA + } + break + } + + // Stat the file + fname := files[i] + subpath := filepath.Join(path, fname) + st, err := os.Stat(subpath) + if err != nil { + return 0, "", err + } + + // Find folder size recursively + if st.IsDir() { + du2, acc, err := folderSize(filepath.Join(subpath), deadline) + if err != nil { + return 0, "", err + } + accuracy = combineAccuracy(acc, accuracy) + du += du2 + filesProcessed++ + } else { // in any other case, add the file size + du += st.Size() + filesProcessed++ + } + + i++ + } + + nonProcessed := totalFiles - filesProcessed + + // Avg is total size in this folder up to now / total files processed + // it includes folders ant not folders + avg := 0.0 + if filesProcessed > 0 { + avg = float64(du) / float64(filesProcessed) + } + duEstimation := int64(avg * float64(nonProcessed)) + du += duEstimation + du += stat.Size() + //fmt.Println(path, "total:", totalFiles, "totalStat:", i, "totalFile:", filesProcessed, "left:", nonProcessed, "avg:", int(avg), "est:", int(duEstimation), "du:", du) + return du, accuracy, nil +} + +// calculateDiskUsage tries to read the DiskUsageFile for a cached +// diskUsage value, otherwise walks the datastore files. +// it is only safe to call in Open() +func (fs *Datastore) calculateDiskUsage() error { + // Try to obtain a previously stored value from disk + if persDu := fs.readDiskUsageFile(); persDu > 0 { + fs.diskUsage = persDu + return nil + } + + msgDone := make(chan struct{}, 1) // prevent race condition + msgTimer := time.AfterFunc(diskUsageMessageTimeout, func() { + fmt.Printf("Calculating datastore size. This might take %s at most and will happen only once\n", + DiskUsageCalcTimeout.String()) + msgDone <- struct{}{} + }) + defer msgTimer.Stop() + deadline := time.Now().Add(DiskUsageCalcTimeout) + du, accuracy, err := folderSize(fs.path, deadline) + if err != nil { + return err + } + if !msgTimer.Stop() { + <-msgDone + } + if accuracy == timedoutA { + fmt.Println("WARN: It took to long to calculate the datastore size") + fmt.Printf("WARN: The total size (%d) is an estimation. You can fix errors by\n", du) + fmt.Printf("WARN: replacing the %s file with the right disk usage in bytes and\n", + filepath.Join(fs.path, DiskUsageFile)) + fmt.Println("WARN: re-opening the datastore") + } + + fs.storedValue.Accuracy = accuracy + fs.diskUsage = du + fs.writeDiskUsageFile(du, true) + + return nil +} + +func fileSize(path string) int64 { + fi, err := os.Stat(path) + if err != nil { + return 0 + } + return fi.Size() +} + +// updateDiskUsage reads the size of path and atomically +// increases or decreases the diskUsage variable. +// setting add to false will subtract from disk usage. +func (fs *Datastore) updateDiskUsage(path string, add bool) { + fsize := fileSize(path) + if !add { + fsize = -fsize + } + + if fsize != 0 { + atomic.AddInt64(&fs.diskUsage, fsize) + fs.checkpointDiskUsage() + } +} + +func (fs *Datastore) checkpointDiskUsage() { + select { + case fs.checkpointCh <- struct{}{}: + // msg sent + default: + // checkpoint request already pending + } +} + +func (fs *Datastore) checkpointLoop() { + defer close(fs.done) + + timerActive := true + timer := time.NewTimer(0) + defer timer.Stop() + for { + select { + case _, more := <-fs.checkpointCh: + du := atomic.LoadInt64(&fs.diskUsage) + fs.dirty = true + if !more { // shutting down + fs.writeDiskUsageFile(du, true) + if fs.dirty { + log.Errorf("could not store final value of disk usage to file, future estimates may be inaccurate") + } + return + } + // If the difference between the checkpointed disk usage and + // current one is larger than than `diskUsageCheckpointPercent` + // of the checkpointed: store it. + newDu := float64(du) + lastCheckpointDu := float64(fs.storedValue.DiskUsage) + diff := math.Abs(newDu - lastCheckpointDu) + if lastCheckpointDu*diskUsageCheckpointPercent < diff*100.0 { + fs.writeDiskUsageFile(du, false) + } + // Otherwise insure the value will be written to disk after + // `diskUsageCheckpointTimeout` + if fs.dirty && !timerActive { + timer.Reset(diskUsageCheckpointTimeout) + timerActive = true + } + case <-timer.C: + timerActive = false + if fs.dirty { + du := atomic.LoadInt64(&fs.diskUsage) + fs.writeDiskUsageFile(du, false) + } + } + } +} + +func (fs *Datastore) writeDiskUsageFile(du int64, doSync bool) { + tmp, err := ioutil.TempFile(fs.path, "du-") + if err != nil { + log.Warningf("cound not write disk usage: %v", err) + return + } + + removed := false + defer func() { + if !removed { + // silence errcheck + _ = os.Remove(tmp.Name()) + } + }() + + toWrite := fs.storedValue + toWrite.DiskUsage = du + encoder := json.NewEncoder(tmp) + if err := encoder.Encode(&toWrite); err != nil { + log.Warningf("cound not write disk usage: %v", err) + return + } + + if doSync { + if err := tmp.Sync(); err != nil { + log.Warningf("cound not sync %s: %v", DiskUsageFile, err) + return + } + } + + if err := tmp.Close(); err != nil { + log.Warningf("cound not write disk usage: %v", err) + return + } + + if err := os.Rename(tmp.Name(), filepath.Join(fs.path, DiskUsageFile)); err != nil { + log.Warningf("cound not write disk usage: %v", err) + return + } + removed = true + + fs.storedValue = toWrite + fs.dirty = false +} + +// readDiskUsageFile is only safe to call in Open() +func (fs *Datastore) readDiskUsageFile() int64 { + fpath := filepath.Join(fs.path, DiskUsageFile) + duB, err := ioutil.ReadFile(fpath) + if err != nil { + return 0 + } + err = json.Unmarshal(duB, &fs.storedValue) + if err != nil { + return 0 + } + return fs.storedValue.DiskUsage +} + +// DiskUsage implements the PersistentDatastore interface +// and returns the current disk usage in bytes used by +// this datastore. +// +// The size is approximative and may slightly differ from +// the real disk values. +func (fs *Datastore) DiskUsage() (uint64, error) { + // it may differ from real disk values if + // the filesystem has allocated for blocks + // for a directory because it has many files in it + // we don't account for "resized" directories. + // In a large datastore, the differences should be + // are negligible though. + + du := atomic.LoadInt64(&fs.diskUsage) + return uint64(du), nil +} + +// Accuracy returns a string representing the accuracy of the +// DiskUsage() result, the value returned is implementation defined +// and for informational purposes only +func (fs *Datastore) Accuracy() string { + return string(fs.storedValue.Accuracy) +} + +func (fs *Datastore) walk(path string, result *query.ResultBuilder) error { + dir, err := os.Open(path) + if err != nil { + if os.IsNotExist(err) { + // not an error if the file disappeared + return nil + } + return err + } + defer dir.Close() + + // ignore non-directories + fileInfo, err := dir.Stat() + if err != nil { + return err + } + if !fileInfo.IsDir() { + return nil + } + + names, err := dir.Readdirnames(-1) + if err != nil { + return err + } + for _, fn := range names { + + if len(fn) == 0 || fn[0] == '.' { + continue + } + + key, ok := fs.decode(fn) + if !ok { + log.Warningf("failed to decode flatfs entry: %s", fn) + continue + } + + select { + case result.Output <- query.Result{ + Entry: query.Entry{ + Key: key.String(), + }, + }: + case <-result.Process.Closing(): + return nil + } + } + return nil +} + +// Deactivate closes background maintenance threads, most write +// operations will fail but readonly operations will continue to +// function +func (fs *Datastore) deactivate() error { + fs.shutdownLock.Lock() + defer fs.shutdownLock.Unlock() + if fs.shutdown { + return nil + } + fs.shutdown = true + close(fs.checkpointCh) + <-fs.done + return nil +} + +func (fs *Datastore) Close() error { + return fs.deactivate() +} + +type flatfsBatch struct { + puts map[datastore.Key][]byte + deletes map[datastore.Key]struct{} + + ds *Datastore +} + +func (fs *Datastore) Batch() (datastore.Batch, error) { + return &flatfsBatch{ + puts: make(map[datastore.Key][]byte), + deletes: make(map[datastore.Key]struct{}), + ds: fs, + }, nil +} + +func (bt *flatfsBatch) Put(key datastore.Key, val []byte) error { + bt.puts[key] = val + return nil +} + +func (bt *flatfsBatch) Delete(key datastore.Key) error { + bt.deletes[key] = struct{}{} + return nil +} + +func (bt *flatfsBatch) Commit() error { + if err := bt.ds.putMany(bt.puts); err != nil { + return err + } + + for k, _ := range bt.deletes { + if err := bt.ds.Delete(k); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/ipfs/go-ds-flatfs/go.mod b/vendor/github.com/ipfs/go-ds-flatfs/go.mod new file mode 100644 index 00000000..f2a9e266 --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-flatfs/go.mod @@ -0,0 +1,7 @@ +module github.com/ipfs/go-ds-flatfs + +require ( + github.com/ipfs/go-datastore v0.0.1 + github.com/ipfs/go-log v0.0.1 + github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 +) diff --git a/vendor/github.com/ipfs/go-ds-flatfs/go.sum b/vendor/github.com/ipfs/go-ds-flatfs/go.sum new file mode 100644 index 00000000..c5e35f8a --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-flatfs/go.sum @@ -0,0 +1,42 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/ipfs/go-datastore v0.0.1 h1:AW/KZCScnBWlSb5JbnEnLKFWXL224LBEh/9KXXOrUms= +github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-log v0.0.1 h1:9XTUN/rW64BCG1YhPK9Hoy3q8nr4gOmHHBpgFdfw6Lc= +github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 h1:bspPhN+oKYFk5fcGNuQzp6IGzYQSenLEgH3s6jkXrWw= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc h1:9lDbC6Rz4bwmou+oE6Dt4Cb2BGMur5eR/GYptkKUVHo= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7 h1:C2F/nMkR/9sfUTpvR3QrjBuTdvMUC/cFajkphs1YLQo= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/ipfs/go-ds-flatfs/package.json b/vendor/github.com/ipfs/go-ds-flatfs/package.json new file mode 100644 index 00000000..e8bd0dbd --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-flatfs/package.json @@ -0,0 +1,35 @@ +{ + "author": "whyrusleeping", + "bugs": { + "url": "https://github.com/ipfs/go-ds-flatfs" + }, + "gx": { + "dvcsimport": "github.com/ipfs/go-ds-flatfs" + }, + "gxDependencies": [ + { + "hash": "QmbkT7eMTyXfpeyB3ZMxxcxg7XH8t6uXp49jqzz4HB7BGF", + "name": "go-log", + "version": "1.5.9" + }, + { + "author": "jbenet", + "hash": "QmUadX5EcvrBmxAV9sE7wUWtWSqxns5K84qKJBixmcT1w9", + "name": "go-datastore", + "version": "3.6.1" + }, + { + "author": "whyrusleeping", + "hash": "QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP", + "name": "goprocess", + "version": "1.0.0" + } + ], + "gxVersion": "0.8.0", + "language": "go", + "license": "", + "name": "go-ds-flatfs", + "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", + "version": "1.3.7" +} + diff --git a/vendor/github.com/ipfs/go-ds-flatfs/readme.go b/vendor/github.com/ipfs/go-ds-flatfs/readme.go new file mode 100644 index 00000000..9c1065bd --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-flatfs/readme.go @@ -0,0 +1,33 @@ +package flatfs + +var README_IPFS_DEF_SHARD = `This is a repository of IPLD objects. Each IPLD object is in a single file, +named .data. Where is the +"base32" encoding of the CID (as specified in +https://github.com/multiformats/multibase) without the 'B' prefix. +All the object files are placed in a tree of directories, based on a +function of the CID. This is a form of sharding similar to +the objects directory in git repositories. Previously, we used +prefixes, we now use the next-to-last two charters. + + func NextToLast(base32cid string) { + nextToLastLen := 2 + offset := len(base32cid) - nextToLastLen - 1 + return str[offset : offset+nextToLastLen] + } + +For example, an object with a base58 CIDv1 of + + zb2rhYSxw4ZjuzgCnWSt19Q94ERaeFhu9uSqRgjSdx9bsgM6f + +has a base32 CIDv1 of + + BAFKREIA22FLID5AJ2KU7URG47MDLROZIH6YF2KALU2PWEFPVI37YLKRSCA + +and will be placed at + + SC/AFKREIA22FLID5AJ2KU7URG47MDLROZIH6YF2KALU2PWEFPVI37YLKRSCA.data + +with 'SC' being the last-to-next two characters and the 'B' at the +beginning of the CIDv1 string is the multibase prefix that is not +stored in the filename. +` diff --git a/vendor/github.com/ipfs/go-ds-flatfs/shard.go b/vendor/github.com/ipfs/go-ds-flatfs/shard.go new file mode 100644 index 00000000..acfa4505 --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-flatfs/shard.go @@ -0,0 +1,145 @@ +package flatfs + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" +) + +var IPFS_DEF_SHARD = NextToLast(2) +var IPFS_DEF_SHARD_STR = IPFS_DEF_SHARD.String() + +const PREFIX = "/repo/flatfs/shard/" + +const SHARDING_FN = "SHARDING" +const README_FN = "_README" + +type ShardIdV1 struct { + funName string + param int + fun ShardFunc +} + +func (f *ShardIdV1) String() string { + return fmt.Sprintf("%sv1/%s/%d", PREFIX, f.funName, f.param) +} + +func (f *ShardIdV1) Func() ShardFunc { + return f.fun +} + +func Prefix(prefixLen int) *ShardIdV1 { + padding := strings.Repeat("_", prefixLen) + return &ShardIdV1{ + funName: "prefix", + param: prefixLen, + fun: func(noslash string) string { + return (noslash + padding)[:prefixLen] + }, + } +} + +func Suffix(suffixLen int) *ShardIdV1 { + padding := strings.Repeat("_", suffixLen) + return &ShardIdV1{ + funName: "suffix", + param: suffixLen, + fun: func(noslash string) string { + str := padding + noslash + return str[len(str)-suffixLen:] + }, + } +} + +func NextToLast(suffixLen int) *ShardIdV1 { + padding := strings.Repeat("_", suffixLen+1) + return &ShardIdV1{ + funName: "next-to-last", + param: suffixLen, + fun: func(noslash string) string { + str := padding + noslash + offset := len(str) - suffixLen - 1 + return str[offset : offset+suffixLen] + }, + } +} + +func ParseShardFunc(str string) (*ShardIdV1, error) { + str = strings.TrimSpace(str) + + if len(str) == 0 { + return nil, fmt.Errorf("empty shard identifier") + } + + trimmed := strings.TrimPrefix(str, PREFIX) + if str == trimmed { // nothing trimmed + return nil, fmt.Errorf("invalid or no prefix in shard identifier: %s", str) + } + str = trimmed + + parts := strings.Split(str, "/") + if len(parts) != 3 { + return nil, fmt.Errorf("invalid shard identifier: %s", str) + } + + version := parts[0] + if version != "v1" { + return nil, fmt.Errorf("expected 'v1' for version string got: %s\n", version) + } + + funName := parts[1] + + param, err := strconv.Atoi(parts[2]) + if err != nil { + return nil, fmt.Errorf("invalid parameter: %v", err) + } + + switch funName { + case "prefix": + return Prefix(param), nil + case "suffix": + return Suffix(param), nil + case "next-to-last": + return NextToLast(param), nil + default: + return nil, fmt.Errorf("expected 'prefix', 'suffix' or 'next-to-last' got: %s", funName) + } + +} + +func ReadShardFunc(dir string) (*ShardIdV1, error) { + buf, err := ioutil.ReadFile(filepath.Join(dir, SHARDING_FN)) + if os.IsNotExist(err) { + return nil, ErrShardingFileMissing + } else if err != nil { + return nil, err + } + return ParseShardFunc(string(buf)) +} + +func WriteShardFunc(dir string, id *ShardIdV1) error { + file, err := os.OpenFile(filepath.Join(dir, SHARDING_FN), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0666) + if err != nil { + return err + } + defer file.Close() + _, err = file.WriteString(id.String()) + if err != nil { + return err + } + _, err = file.WriteString("\n") + return err +} + +func WriteReadme(dir string, id *ShardIdV1) error { + if id.String() == IPFS_DEF_SHARD.String() { + err := ioutil.WriteFile(filepath.Join(dir, README_FN), []byte(README_IPFS_DEF_SHARD), 0444) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/ipfs/go-ds-flatfs/sync.go b/vendor/github.com/ipfs/go-ds-flatfs/sync.go new file mode 100644 index 00000000..bf38c16f --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-flatfs/sync.go @@ -0,0 +1,42 @@ +package flatfs + +import ( + "os" + "runtime" +) + +// don't block more than 16 threads on sync opearation +// 16 should be able to sataurate most RAIDs +// in case of two used disks per write (RAID 1, 5) and queue depth of 2, +// 16 concurrent Sync calls should be able to saturate 16 HDDs RAID +//TODO: benchmark it out, maybe provide tweak parmeter +const SyncThreadsMax = 16 + +var syncSemaphore chan struct{} = make(chan struct{}, SyncThreadsMax) + +func syncDir(dir string) error { + if runtime.GOOS == "windows" { + // dir sync on windows doesn't work: https://git.io/vPnCI + return nil + } + + dirF, err := os.Open(dir) + if err != nil { + return err + } + defer dirF.Close() + + syncSemaphore <- struct{}{} + defer func() { <-syncSemaphore }() + + if err := dirF.Sync(); err != nil { + return err + } + return nil +} + +func syncFile(file *os.File) error { + syncSemaphore <- struct{}{} + defer func() { <-syncSemaphore }() + return file.Sync() +} diff --git a/vendor/github.com/ipfs/go-ds-flatfs/util.go b/vendor/github.com/ipfs/go-ds-flatfs/util.go new file mode 100644 index 00000000..31ebde05 --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-flatfs/util.go @@ -0,0 +1,21 @@ +package flatfs + +import ( + "io" + "os" +) + +// From: http://stackoverflow.com/questions/30697324/how-to-check-if-directory-on-path-is-empty +func DirIsEmpty(name string) (bool, error) { + f, err := os.Open(name) + if err != nil { + return false, err + } + defer f.Close() + + _, err = f.Readdirnames(1) // Or f.Readdir(1) + if err == io.EOF { + return true, nil + } + return false, err // Either not empty or error, suits both cases +} diff --git a/vendor/github.com/ipfs/go-ds-leveldb/.gitignore b/vendor/github.com/ipfs/go-ds-leveldb/.gitignore new file mode 100644 index 00000000..1377554e --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-leveldb/.gitignore @@ -0,0 +1 @@ +*.swp diff --git a/vendor/github.com/ipfs/go-ds-leveldb/.travis.yml b/vendor/github.com/ipfs/go-ds-leveldb/.travis.yml new file mode 100644 index 00000000..b86c124d --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-leveldb/.travis.yml @@ -0,0 +1,30 @@ +os: + - linux + +language: go + +go: + - 1.11.x + +env: + global: + - GOTFLAGS="-race -cpu=5" + matrix: + - BUILD_DEPTYPE=gomod + + +# disable travis install +install: + - true + +script: + - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) + + +cache: + directories: + - $GOPATH/pkg/mod + - $HOME/.cache/go-build + +notifications: + email: false diff --git a/vendor/github.com/ipfs/go-ds-leveldb/LICENSE b/vendor/github.com/ipfs/go-ds-leveldb/LICENSE new file mode 100644 index 00000000..6152c321 --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-leveldb/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2016 Jeromy Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/ipfs/go-ds-leveldb/Makefile b/vendor/github.com/ipfs/go-ds-leveldb/Makefile new file mode 100644 index 00000000..54152565 --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-leveldb/Makefile @@ -0,0 +1,9 @@ +export IPFS_API ?= v04x.ipfs.io + +gx: + go get -u github.com/whyrusleeping/gx + go get -u github.com/whyrusleeping/gx-go + +deps: gx + gx --verbose install --global + gx-go rewrite diff --git a/vendor/github.com/ipfs/go-ds-leveldb/README.md b/vendor/github.com/ipfs/go-ds-leveldb/README.md new file mode 100644 index 00000000..53515c2f --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-leveldb/README.md @@ -0,0 +1,47 @@ +# go-ds-leveldb + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) +[![standard-readme compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) +[![GoDoc](https://godoc.org/github.com/ipfs/go-ds-leveldb?status.svg)](https://godoc.org/github.com/ipfs/go-ds-leveldb) +[![Build Status](https://travis-ci.org/ipfs/go-ds-leveldb.svg?branch=master)](https://travis-ci.org/ipfs/go-ds-leveldb) + +> A go-datastore implementation using LevelDB + +`go-ds-leveldb` implements the [go-datastore](https://github.com/ipfs/go-datastore) interface using a LevelDB backend. + +## Table of Contents + +- [Install](#install) +- [Usage](#usage) +- [Contribute](#contribute) +- [License](#license) + +## Install + +This module can be installed like a regular go module: + +``` +go get github.com/ipfs/go-ds-leveldb +``` + +It uses [Gx](https://github.com/whyrusleeping/gx) to manage dependencies. You can use `make deps` to rewrite imports to the gx-specified versions. + +## Usage + +``` +import "github.com/ipfs/go-ds-leveldb" +``` + +Check the [GoDoc documentation](https://godoc.org/github.com/ipfs/go-ds-leveldb) + + +## Contribute + +PRs accepted. + +Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +MIT © Protocol Labs, Inc. diff --git a/vendor/github.com/ipfs/go-ds-leveldb/datastore.go b/vendor/github.com/ipfs/go-ds-leveldb/datastore.go new file mode 100644 index 00000000..1b2df122 --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-leveldb/datastore.go @@ -0,0 +1,239 @@ +package leveldb + +import ( + "os" + "path/filepath" + + ds "github.com/ipfs/go-datastore" + dsq "github.com/ipfs/go-datastore/query" + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/util" +) + +type Datastore struct { + *accessor + DB *leveldb.DB + path string +} + +var _ ds.Datastore = (*Datastore)(nil) +var _ ds.TxnDatastore = (*Datastore)(nil) + +// Options is an alias of syndtr/goleveldb/opt.Options which might be extended +// in the future. +type Options opt.Options + +// NewDatastore returns a new datastore backed by leveldb +// +// for path == "", an in memory bachend will be chosen +func NewDatastore(path string, opts *Options) (*Datastore, error) { + var nopts opt.Options + if opts != nil { + nopts = opt.Options(*opts) + } + + var err error + var db *leveldb.DB + + if path == "" { + db, err = leveldb.Open(storage.NewMemStorage(), &nopts) + } else { + db, err = leveldb.OpenFile(path, &nopts) + if errors.IsCorrupted(err) && !nopts.GetReadOnly() { + db, err = leveldb.RecoverFile(path, &nopts) + } + } + + if err != nil { + return nil, err + } + + return &Datastore{ + accessor: &accessor{ldb: db}, + DB: db, + path: path, + }, nil +} + +// An extraction of the common interface between LevelDB Transactions and the DB itself. +// +// It allows to plug in either inside the `accessor`. +type levelDbOps interface { + Put(key, value []byte, wo *opt.WriteOptions) error + Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) + Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) + Delete(key []byte, wo *opt.WriteOptions) error + NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator +} + +// Datastore operations using either the DB or a transaction as the backend. +type accessor struct { + ldb levelDbOps +} + +func (a *accessor) Put(key ds.Key, value []byte) (err error) { + return a.ldb.Put(key.Bytes(), value, nil) +} + +func (a *accessor) Get(key ds.Key) (value []byte, err error) { + val, err := a.ldb.Get(key.Bytes(), nil) + if err != nil { + if err == leveldb.ErrNotFound { + return nil, ds.ErrNotFound + } + return nil, err + } + return val, nil +} + +func (a *accessor) Has(key ds.Key) (exists bool, err error) { + return a.ldb.Has(key.Bytes(), nil) +} + +func (d *accessor) GetSize(key ds.Key) (size int, err error) { + return ds.GetBackedSize(d, key) +} + +func (a *accessor) Delete(key ds.Key) (err error) { + // leveldb Delete will not return an error if the key doesn't + // exist (see https://github.com/syndtr/goleveldb/issues/109), + // so check that the key exists first and if not return an + // error + exists, err := a.ldb.Has(key.Bytes(), nil) + if !exists { + return ds.ErrNotFound + } else if err != nil { + return err + } + return a.ldb.Delete(key.Bytes(), nil) +} + +func (a *accessor) Query(q dsq.Query) (dsq.Results, error) { + var rnge *util.Range + + // make a copy of the query for the fallback naive query implementation. + // don't modify the original so res.Query() returns the correct results. + qNaive := q + if q.Prefix != "" { + rnge = util.BytesPrefix([]byte(q.Prefix)) + qNaive.Prefix = "" + } + i := a.ldb.NewIterator(rnge, nil) + next := i.Next + if len(q.Orders) > 0 { + switch q.Orders[0].(type) { + case dsq.OrderByKey, *dsq.OrderByKey: + qNaive.Orders = nil + case dsq.OrderByKeyDescending, *dsq.OrderByKeyDescending: + next = func() bool { + next = i.Prev + return i.Last() + } + qNaive.Orders = nil + default: + } + } + r := dsq.ResultsFromIterator(q, dsq.Iterator{ + Next: func() (dsq.Result, bool) { + if !next() { + return dsq.Result{}, false + } + k := string(i.Key()) + e := dsq.Entry{Key: k} + + if !q.KeysOnly { + buf := make([]byte, len(i.Value())) + copy(buf, i.Value()) + e.Value = buf + } + return dsq.Result{Entry: e}, true + }, + Close: func() error { + i.Release() + return nil + }, + }) + return dsq.NaiveQueryApply(qNaive, r), nil +} + +// DiskUsage returns the current disk size used by this levelDB. +// For in-mem datastores, it will return 0. +func (d *Datastore) DiskUsage() (uint64, error) { + if d.path == "" { // in-mem + return 0, nil + } + + var du uint64 + + err := filepath.Walk(d.path, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + du += uint64(info.Size()) + return nil + }) + + if err != nil { + return 0, err + } + + return du, nil +} + +// LevelDB needs to be closed. +func (d *Datastore) Close() (err error) { + return d.DB.Close() +} + +type leveldbBatch struct { + b *leveldb.Batch + db *leveldb.DB +} + +func (d *Datastore) Batch() (ds.Batch, error) { + return &leveldbBatch{ + b: new(leveldb.Batch), + db: d.DB, + }, nil +} + +func (b *leveldbBatch) Put(key ds.Key, value []byte) error { + b.b.Put(key.Bytes(), value) + return nil +} + +func (b *leveldbBatch) Commit() error { + return b.db.Write(b.b, nil) +} + +func (b *leveldbBatch) Delete(key ds.Key) error { + b.b.Delete(key.Bytes()) + return nil +} + +// A leveldb transaction embedding the accessor backed by the transaction. +type transaction struct { + *accessor + tx *leveldb.Transaction +} + +func (t *transaction) Commit() error { + return t.tx.Commit() +} + +func (t *transaction) Discard() { + t.tx.Discard() +} + +func (d *Datastore) NewTransaction(readOnly bool) (ds.Txn, error) { + tx, err := d.DB.OpenTransaction() + if err != nil { + return nil, err + } + accessor := &accessor{tx} + return &transaction{accessor, tx}, nil +} diff --git a/vendor/github.com/ipfs/go-ds-leveldb/go.mod b/vendor/github.com/ipfs/go-ds-leveldb/go.mod new file mode 100644 index 00000000..d26e1c18 --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-leveldb/go.mod @@ -0,0 +1,6 @@ +module github.com/ipfs/go-ds-leveldb + +require ( + github.com/ipfs/go-datastore v0.0.3 + github.com/syndtr/goleveldb v1.0.0 +) diff --git a/vendor/github.com/ipfs/go-ds-leveldb/go.sum b/vendor/github.com/ipfs/go-ds-leveldb/go.sum new file mode 100644 index 00000000..d6ecf9b4 --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-leveldb/go.sum @@ -0,0 +1,46 @@ +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ipfs/go-datastore v0.0.3 h1:/eP3nMDmLzMJNoWSSYvEkmMTTrm9FFCN+JraP9NdlwU= +github.com/ipfs/go-datastore v0.0.3/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 h1:bspPhN+oKYFk5fcGNuQzp6IGzYQSenLEgH3s6jkXrWw= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/ipfs/go-ipfs/core/coredag/cbor.go b/vendor/github.com/ipfs/go-ipfs/core/coredag/cbor.go new file mode 100644 index 00000000..b51a7057 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs/core/coredag/cbor.go @@ -0,0 +1,32 @@ +package coredag + +import ( + "io" + "io/ioutil" + + ipldcbor "github.com/ipfs/go-ipld-cbor" + ipld "github.com/ipfs/go-ipld-format" +) + +func cborJSONParser(r io.Reader, mhType uint64, mhLen int) ([]ipld.Node, error) { + nd, err := ipldcbor.FromJSON(r, mhType, mhLen) + if err != nil { + return nil, err + } + + return []ipld.Node{nd}, nil +} + +func cborRawParser(r io.Reader, mhType uint64, mhLen int) ([]ipld.Node, error) { + data, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + nd, err := ipldcbor.Decode(data, mhType, mhLen) + if err != nil { + return nil, err + } + + return []ipld.Node{nd}, nil +} diff --git a/vendor/github.com/ipfs/go-ipfs/core/coredag/dagpb.go b/vendor/github.com/ipfs/go-ipfs/core/coredag/dagpb.go new file mode 100644 index 00000000..0350ec5c --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs/core/coredag/dagpb.go @@ -0,0 +1,66 @@ +package coredag + +import ( + "io" + "io/ioutil" + "math" + + "github.com/ipfs/go-merkledag" + + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + mh "github.com/multiformats/go-multihash" +) + +func dagpbJSONParser(r io.Reader, mhType uint64, mhLen int) ([]ipld.Node, error) { + data, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + nd := &merkledag.ProtoNode{} + + err = nd.UnmarshalJSON(data) + if err != nil { + return nil, err + } + + nd.SetCidBuilder(cidPrefix(mhType, mhLen)) + + return []ipld.Node{nd}, nil +} + +func dagpbRawParser(r io.Reader, mhType uint64, mhLen int) ([]ipld.Node, error) { + data, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + nd, err := merkledag.DecodeProtobuf(data) + if err != nil { + return nil, err + } + + nd.SetCidBuilder(cidPrefix(mhType, mhLen)) + + return []ipld.Node{nd}, nil +} + +func cidPrefix(mhType uint64, mhLen int) *cid.Prefix { + if mhType == math.MaxUint64 { + mhType = mh.SHA2_256 + } + + prefix := &cid.Prefix{ + MhType: mhType, + MhLength: mhLen, + Version: 1, + Codec: cid.DagProtobuf, + } + + if mhType == mh.SHA2_256 { + prefix.Version = 0 + } + + return prefix +} diff --git a/vendor/github.com/ipfs/go-ipfs/core/coredag/dagtransl.go b/vendor/github.com/ipfs/go-ipfs/core/coredag/dagtransl.go new file mode 100644 index 00000000..67a79246 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs/core/coredag/dagtransl.go @@ -0,0 +1,86 @@ +package coredag + +import ( + "fmt" + "io" + + ipld "github.com/ipfs/go-ipld-format" +) + +// DagParser is function used for parsing stream into Node +type DagParser func(r io.Reader, mhType uint64, mhLen int) ([]ipld.Node, error) + +// FormatParsers is used for mapping format descriptors to DagParsers +type FormatParsers map[string]DagParser + +// InputEncParsers is used for mapping input encodings to FormatParsers +type InputEncParsers map[string]FormatParsers + +// DefaultInputEncParsers is InputEncParser that is used everywhere +var DefaultInputEncParsers = InputEncParsers{ + "json": defaultJSONParsers, + "raw": defaultRawParsers, + "cbor": defaultCborParsers, + "protobuf": defaultProtobufParsers, +} + +var defaultJSONParsers = FormatParsers{ + "cbor": cborJSONParser, + "dag-cbor": cborJSONParser, + + "protobuf": dagpbJSONParser, + "dag-pb": dagpbJSONParser, +} + +var defaultRawParsers = FormatParsers{ + "cbor": cborRawParser, + "dag-cbor": cborRawParser, + + "protobuf": dagpbRawParser, + "dag-pb": dagpbRawParser, + + "raw": rawRawParser, +} + +var defaultCborParsers = FormatParsers{ + "cbor": cborRawParser, + "dag-cbor": cborRawParser, +} + +var defaultProtobufParsers = FormatParsers{ + "protobuf": dagpbRawParser, + "dag-pb": dagpbRawParser, +} + +// ParseInputs uses DefaultInputEncParsers to parse io.Reader described by +// input encoding and format to an instance of ipld Node +func ParseInputs(ienc, format string, r io.Reader, mhType uint64, mhLen int) ([]ipld.Node, error) { + return DefaultInputEncParsers.ParseInputs(ienc, format, r, mhType, mhLen) +} + +// AddParser adds DagParser under give input encoding and format +func (iep InputEncParsers) AddParser(ienc, format string, f DagParser) { + m, ok := iep[ienc] + if !ok { + m = make(FormatParsers) + iep[ienc] = m + } + + m[format] = f +} + +// ParseInputs parses io.Reader described by input encoding and format to +// an instance of ipld Node +func (iep InputEncParsers) ParseInputs(ienc, format string, r io.Reader, mhType uint64, mhLen int) ([]ipld.Node, error) { + parsers, ok := iep[ienc] + if !ok { + return nil, fmt.Errorf("no input parser for %q", ienc) + } + + parser, ok := parsers[format] + if !ok { + return nil, fmt.Errorf("no parser for format %q using input type %q", format, ienc) + } + + return parser(r, mhType, mhLen) +} diff --git a/vendor/github.com/ipfs/go-ipfs/core/coredag/raw.go b/vendor/github.com/ipfs/go-ipfs/core/coredag/raw.go new file mode 100644 index 00000000..03bbffac --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs/core/coredag/raw.go @@ -0,0 +1,37 @@ +package coredag + +import ( + "io" + "io/ioutil" + "math" + + "github.com/ipfs/go-merkledag" + + block "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + mh "github.com/multiformats/go-multihash" +) + +func rawRawParser(r io.Reader, mhType uint64, mhLen int) ([]ipld.Node, error) { + if mhType == math.MaxUint64 { + mhType = mh.SHA2_256 + } + + data, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + h, err := mh.Sum(data, mhType, mhLen) + if err != nil { + return nil, err + } + c := cid.NewCidV1(cid.Raw, h) + blk, err := block.NewBlockWithCid(data, c) + if err != nil { + return nil, err + } + nd := &merkledag.RawNode{Block: blk} + return []ipld.Node{nd}, nil +} diff --git a/vendor/github.com/ipfs/go-ipfs/plugin/Rules.mk b/vendor/github.com/ipfs/go-ipfs/plugin/Rules.mk new file mode 100644 index 00000000..1e26d2a3 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs/plugin/Rules.mk @@ -0,0 +1,9 @@ +include mk/header.mk + +dir := $(d)/loader +include $(dir)/Rules.mk + +dir := $(d)/plugins +include $(dir)/Rules.mk + +include mk/footer.mk diff --git a/vendor/github.com/ipfs/go-ipfs/plugin/daemon.go b/vendor/github.com/ipfs/go-ipfs/plugin/daemon.go new file mode 100644 index 00000000..8ed86b76 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs/plugin/daemon.go @@ -0,0 +1,14 @@ +package plugin + +import ( + coreiface "github.com/ipfs/interface-go-ipfs-core" +) + +// PluginDaemon is an interface for daemon plugins. These plugins will be run on +// the daemon and will be given access to an implementation of the CoreAPI. +type PluginDaemon interface { + Plugin + + Start(coreiface.CoreAPI) error + Close() error +} diff --git a/vendor/github.com/ipfs/go-ipfs/plugin/datastore.go b/vendor/github.com/ipfs/go-ipfs/plugin/datastore.go new file mode 100644 index 00000000..735eedc9 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs/plugin/datastore.go @@ -0,0 +1,14 @@ +package plugin + +import ( + "github.com/ipfs/go-ipfs/repo/fsrepo" +) + +// PluginDatastore is an interface that can be implemented to add handlers for +// for different datastores +type PluginDatastore interface { + Plugin + + DatastoreTypeName() string + DatastoreConfigParser() fsrepo.ConfigFromMap +} diff --git a/vendor/github.com/ipfs/go-ipfs/plugin/ipld.go b/vendor/github.com/ipfs/go-ipfs/plugin/ipld.go new file mode 100644 index 00000000..5b45e9cd --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs/plugin/ipld.go @@ -0,0 +1,16 @@ +package plugin + +import ( + "github.com/ipfs/go-ipfs/core/coredag" + + ipld "github.com/ipfs/go-ipld-format" +) + +// PluginIPLD is an interface that can be implemented to add handlers for +// for different IPLD formats +type PluginIPLD interface { + Plugin + + RegisterBlockDecoders(dec ipld.BlockDecoder) error + RegisterInputEncParsers(iec coredag.InputEncParsers) error +} diff --git a/vendor/github.com/ipfs/go-ipfs/plugin/loader/Rules.mk b/vendor/github.com/ipfs/go-ipfs/plugin/loader/Rules.mk new file mode 100644 index 00000000..01f50e27 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs/plugin/loader/Rules.mk @@ -0,0 +1,10 @@ +include mk/header.mk + +$(d)/preload.go: d:=$(d) +$(d)/preload.go: $(d)/preload_list $(d)/preload.sh + $(d)/preload.sh > $@ + go fmt $@ >/dev/null + +DEPS_GO += $(d)/preload.go + +include mk/footer.mk diff --git a/vendor/github.com/ipfs/go-ipfs/plugin/loader/load_linux.go b/vendor/github.com/ipfs/go-ipfs/plugin/loader/load_linux.go new file mode 100644 index 00000000..19c1e994 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs/plugin/loader/load_linux.go @@ -0,0 +1,68 @@ +// +build !noplugin + +package loader + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "plugin" + + iplugin "github.com/ipfs/go-ipfs/plugin" +) + +func init() { + loadPluginsFunc = linuxLoadFunc +} + +func linuxLoadFunc(pluginDir string) ([]iplugin.Plugin, error) { + var plugins []iplugin.Plugin + + err := filepath.Walk(pluginDir, func(fi string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + if fi != pluginDir { + log.Warningf("found directory inside plugins directory: %s", fi) + } + return nil + } + + if info.Mode().Perm()&0111 == 0 { + // file is not executable let's not load it + // this is to prevent loading plugins from for example non-executable + // mounts, some /tmp mounts are marked as such for security + log.Errorf("non-executable file in plugins directory: %s", fi) + return nil + } + + if newPlugins, err := loadPlugin(fi); err == nil { + plugins = append(plugins, newPlugins...) + } else { + return fmt.Errorf("loading plugin %s: %s", fi, err) + } + return nil + }) + + return plugins, err +} + +func loadPlugin(fi string) ([]iplugin.Plugin, error) { + pl, err := plugin.Open(fi) + if err != nil { + return nil, err + } + pls, err := pl.Lookup("Plugins") + if err != nil { + return nil, err + } + + typePls, ok := pls.(*[]iplugin.Plugin) + if !ok { + return nil, errors.New("filed 'Plugins' didn't contain correct type") + } + + return *typePls, nil +} diff --git a/vendor/github.com/ipfs/go-ipfs/plugin/loader/loader.go b/vendor/github.com/ipfs/go-ipfs/plugin/loader/loader.go new file mode 100644 index 00000000..b5a2d08e --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs/plugin/loader/loader.go @@ -0,0 +1,170 @@ +package loader + +import ( + "fmt" + "os" + "strings" + + coredag "github.com/ipfs/go-ipfs/core/coredag" + plugin "github.com/ipfs/go-ipfs/plugin" + fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo" + + ipld "github.com/ipfs/go-ipld-format" + logging "github.com/ipfs/go-log" + coreiface "github.com/ipfs/interface-go-ipfs-core" + opentracing "github.com/opentracing/opentracing-go" +) + +var log = logging.Logger("plugin/loader") + +var loadPluginsFunc = func(string) ([]plugin.Plugin, error) { + return nil, nil +} + +// PluginLoader keeps track of loaded plugins +type PluginLoader struct { + plugins []plugin.Plugin +} + +// NewPluginLoader creates new plugin loader +func NewPluginLoader(pluginDir string) (*PluginLoader, error) { + plMap := make(map[string]plugin.Plugin) + for _, v := range preloadPlugins { + plMap[v.Name()] = v + } + + if pluginDir != "" { + newPls, err := loadDynamicPlugins(pluginDir) + if err != nil { + return nil, err + } + + for _, pl := range newPls { + if ppl, ok := plMap[pl.Name()]; ok { + // plugin is already preloaded + return nil, fmt.Errorf( + "plugin: %s, is duplicated in version: %s, "+ + "while trying to load dynamically: %s", + ppl.Name(), ppl.Version(), pl.Version()) + } + plMap[pl.Name()] = pl + } + } + + loader := &PluginLoader{plugins: make([]plugin.Plugin, 0, len(plMap))} + + for _, v := range plMap { + loader.plugins = append(loader.plugins, v) + } + + return loader, nil +} + +func loadDynamicPlugins(pluginDir string) ([]plugin.Plugin, error) { + _, err := os.Stat(pluginDir) + if os.IsNotExist(err) { + return nil, nil + } + if err != nil { + return nil, err + } + + return loadPluginsFunc(pluginDir) +} + +// Initialize initializes all loaded plugins +func (loader *PluginLoader) Initialize() error { + for _, p := range loader.plugins { + err := p.Init() + if err != nil { + return err + } + } + + return nil +} + +// Inject hooks all the plugins into the appropriate subsystems. +func (loader *PluginLoader) Inject() error { + for _, pl := range loader.plugins { + if pl, ok := pl.(plugin.PluginIPLD); ok { + err := injectIPLDPlugin(pl) + if err != nil { + return err + } + } + if pl, ok := pl.(plugin.PluginTracer); ok { + err := injectTracerPlugin(pl) + if err != nil { + return err + } + } + if pl, ok := pl.(plugin.PluginDatastore); ok { + err := injectDatastorePlugin(pl) + if err != nil { + return err + } + } + } + return nil +} + +// Start starts all long-running plugins. +func (loader *PluginLoader) Start(iface coreiface.CoreAPI) error { + for i, pl := range loader.plugins { + if pl, ok := pl.(plugin.PluginDaemon); ok { + err := pl.Start(iface) + if err != nil { + _ = closePlugins(loader.plugins[i:]) + return err + } + } + } + return nil +} + +// StopDaemon stops all long-running plugins. +func (loader *PluginLoader) Close() error { + return closePlugins(loader.plugins) +} + +func closePlugins(plugins []plugin.Plugin) error { + var errs []string + for _, pl := range plugins { + if pl, ok := pl.(plugin.PluginDaemon); ok { + err := pl.Close() + if err != nil { + errs = append(errs, fmt.Sprintf( + "error closing plugin %s: %s", + pl.Name(), + err.Error(), + )) + } + } + } + if errs != nil { + return fmt.Errorf(strings.Join(errs, "\n")) + } + return nil +} + +func injectDatastorePlugin(pl plugin.PluginDatastore) error { + return fsrepo.AddDatastoreConfigHandler(pl.DatastoreTypeName(), pl.DatastoreConfigParser()) +} + +func injectIPLDPlugin(pl plugin.PluginIPLD) error { + err := pl.RegisterBlockDecoders(ipld.DefaultBlockDecoder) + if err != nil { + return err + } + return pl.RegisterInputEncParsers(coredag.DefaultInputEncParsers) +} + +func injectTracerPlugin(pl plugin.PluginTracer) error { + tracer, err := pl.InitTracer() + if err != nil { + return err + } + opentracing.SetGlobalTracer(tracer) + return nil +} diff --git a/vendor/github.com/ipfs/go-ipfs/plugin/loader/preload.go b/vendor/github.com/ipfs/go-ipfs/plugin/loader/preload.go new file mode 100644 index 00000000..730f3538 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs/plugin/loader/preload.go @@ -0,0 +1,20 @@ +package loader + +import ( + "github.com/ipfs/go-ipfs/plugin" + pluginbadgerds "github.com/ipfs/go-ipfs/plugin/plugins/badgerds" + pluginflatfs "github.com/ipfs/go-ipfs/plugin/plugins/flatfs" + pluginipldgit "github.com/ipfs/go-ipfs/plugin/plugins/git" + pluginlevelds "github.com/ipfs/go-ipfs/plugin/plugins/levelds" +) + +// DO NOT EDIT THIS FILE +// This file is being generated as part of plugin build process +// To change it, modify the plugin/loader/preload.sh + +var preloadPlugins = []plugin.Plugin{ + pluginipldgit.Plugins[0], + pluginbadgerds.Plugins[0], + pluginflatfs.Plugins[0], + pluginlevelds.Plugins[0], +} diff --git a/vendor/github.com/ipfs/go-ipfs/plugin/loader/preload.sh b/vendor/github.com/ipfs/go-ipfs/plugin/loader/preload.sh new file mode 100755 index 00000000..5d000d0a --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs/plugin/loader/preload.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +to_preload() { + awk 'NF' "$DIR/preload_list" | sed '/^#/d' +} + +cat < An ipld codec for git objects allowing path traversals across the git graph! + +Note: This is WIP and may not be an entirely correct parser. + +## Table of Contents + +- [Install](#install) +- [About](#about) +- [Contribute](#contribute) +- [License](#license) + +## Install + +```sh +go get github.com/ipfs/go-ipld-git +``` + +## About +This is an IPLD codec which handles git objects. Objects are transformed +into IPLD graph in the following way: + +* Commit: +```json +{ + "author": { + "date": "1503667703 +0200", + "email": "author@mail", + "name": "Author Name" + }, + "committer": { + "date": "1503667703 +0200", + "email": "author@mail", + "name": "Author Name" + }, + "message": "Commit Message\n", + "parents": [ + , , ... + ], + "tree": +} + +``` + +* Tag: +```json +{ + "message": "message\n", + "object": { + "/": "baf4bcfg3mbz3yj3njqyr3ifdaqyfv3prei6h6bq" + }, + "tag": "tagname", + "tagger": { + "date": "1503667703 +0200", + "email": "author@mail", + "name": "Author Name" + }, + "type": "commit" +} + +``` + +* Tree: +```json +{ + "file.name": { + "mode": "100664", + "hash": + }, + "directoryname": { + "mode": "40000", + "hash": + }, + ... +} +``` + + +* Blob: +```json +"\0'>" +``` +## Contribute + +PRs are welcome! + +Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +MIT © Jeromy Johnson diff --git a/vendor/github.com/ipfs/go-ipld-git/blob.go b/vendor/github.com/ipfs/go-ipld-git/blob.go new file mode 100644 index 00000000..8a67b981 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-git/blob.go @@ -0,0 +1,71 @@ +package ipldgit + +import ( + "encoding/json" + "errors" + + cid "github.com/ipfs/go-cid" + node "github.com/ipfs/go-ipld-format" +) + +type Blob struct { + rawData []byte + cid cid.Cid +} + +func (b *Blob) Cid() cid.Cid { + return b.cid +} + +func (b *Blob) Copy() node.Node { + nb := *b + return &nb +} + +func (b *Blob) Links() []*node.Link { + return nil +} + +func (b *Blob) Resolve(_ []string) (interface{}, []string, error) { + return nil, nil, errors.New("no such link") +} + +func (b *Blob) ResolveLink(_ []string) (*node.Link, []string, error) { + return nil, nil, errors.New("no such link") +} + +func (b *Blob) Loggable() map[string]interface{} { + return map[string]interface{}{ + "type": "git_blob", + } +} + +func (b *Blob) MarshalJSON() ([]byte, error) { + return json.Marshal(b.rawData) +} + +func (b *Blob) RawData() []byte { + return []byte(b.rawData) +} + +func (b *Blob) Size() (uint64, error) { + return uint64(len(b.rawData)), nil +} + +func (b *Blob) Stat() (*node.NodeStat, error) { + return &node.NodeStat{}, nil +} + +func (b *Blob) String() string { + return "[git blob]" +} + +func (b *Blob) Tree(p string, depth int) []string { + return nil +} + +func (b *Blob) GitSha() []byte { + return cidToSha(b.Cid()) +} + +var _ node.Node = (*Blob)(nil) diff --git a/vendor/github.com/ipfs/go-ipld-git/codecov.yml b/vendor/github.com/ipfs/go-ipld-git/codecov.yml new file mode 100644 index 00000000..5f88a9ea --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-git/codecov.yml @@ -0,0 +1,3 @@ +coverage: + range: "50...100" +comment: off diff --git a/vendor/github.com/ipfs/go-ipld-git/commit.go b/vendor/github.com/ipfs/go-ipld-git/commit.go new file mode 100644 index 00000000..00b81e32 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-git/commit.go @@ -0,0 +1,286 @@ +package ipldgit + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "strconv" + "sync" + + cid "github.com/ipfs/go-cid" + node "github.com/ipfs/go-ipld-format" +) + +type Commit struct { + DataSize string `json:"-"` + GitTree cid.Cid `json:"tree"` + Parents []cid.Cid `json:"parents"` + Message string `json:"message"` + Author *PersonInfo `json:"author"` + Committer *PersonInfo `json:"committer"` + Encoding string `json:"encoding,omitempty"` + Sig *GpgSig `json:"signature,omitempty"` + MergeTag []*MergeTag `json:"mergetag,omitempty"` + + // Other contains all the non-standard headers, such as 'HG:extra' + Other []string `json:"other,omitempty"` + + cid cid.Cid + + rawData []byte + rawDataOnce sync.Once +} + +type PersonInfo struct { + Name string + Email string + Date string + Timezone string +} + +func (pi *PersonInfo) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]string{ + "name": pi.Name, + "email": pi.Email, + "date": pi.Date + " " + pi.Timezone, + }) +} + +func (pi *PersonInfo) String() string { + f := "%s <%s>" + arg := []interface{}{pi.Name, pi.Email} + if pi.Date != "" { + f = f + " %s" + arg = append(arg, pi.Date) + } + + if pi.Timezone != "" { + f = f + " %s" + arg = append(arg, pi.Timezone) + } + return fmt.Sprintf(f, arg...) +} + +func (pi *PersonInfo) tree(name string, depth int) []string { + if depth == 1 { + return []string{name} + } + return []string{name + "/name", name + "/email", name + "/date"} +} + +func (pi *PersonInfo) resolve(p []string) (interface{}, []string, error) { + switch p[0] { + case "name": + return pi.Name, p[1:], nil + case "email": + return pi.Email, p[1:], nil + case "date": + return pi.Date + " " + pi.Timezone, p[1:], nil + default: + return nil, nil, errors.New("no such link") + } +} + +type MergeTag struct { + Object cid.Cid `json:"object"` + Type string `json:"type"` + Tag string `json:"tag"` + Tagger *PersonInfo `json:"tagger"` + Text string `json:"text"` +} + +type GpgSig struct { + Text string +} + +func (c *Commit) Cid() cid.Cid { + return c.cid +} + +func (c *Commit) Copy() node.Node { + nc := *c + return &nc +} + +func (c *Commit) Links() []*node.Link { + out := []*node.Link{ + {Cid: c.GitTree}, + } + + for _, p := range c.Parents { + out = append(out, &node.Link{Cid: p}) + } + return out +} + +func (c *Commit) Loggable() map[string]interface{} { + return map[string]interface{}{ + "type": "git_commit", + } +} + +func (c *Commit) RawData() []byte { + c.rawDataOnce.Do(func() { + buf := new(bytes.Buffer) + fmt.Fprintf(buf, "commit %s\x00", c.DataSize) + fmt.Fprintf(buf, "tree %s\n", hex.EncodeToString(cidToSha(c.GitTree))) + for _, p := range c.Parents { + fmt.Fprintf(buf, "parent %s\n", hex.EncodeToString(cidToSha(p))) + } + fmt.Fprintf(buf, "author %s\n", c.Author.String()) + fmt.Fprintf(buf, "committer %s\n", c.Committer.String()) + if len(c.Encoding) > 0 { + fmt.Fprintf(buf, "encoding %s\n", c.Encoding) + } + for _, mtag := range c.MergeTag { + fmt.Fprintf(buf, "mergetag object %s\n", hex.EncodeToString(cidToSha(mtag.Object))) + fmt.Fprintf(buf, " type %s\n", mtag.Type) + fmt.Fprintf(buf, " tag %s\n", mtag.Tag) + fmt.Fprintf(buf, " tagger %s\n \n", mtag.Tagger.String()) + fmt.Fprintf(buf, "%s", mtag.Text) + } + if c.Sig != nil { + fmt.Fprintln(buf, "gpgsig -----BEGIN PGP SIGNATURE-----") + fmt.Fprint(buf, c.Sig.Text) + fmt.Fprintln(buf, " -----END PGP SIGNATURE-----") + } + for _, line := range c.Other { + fmt.Fprintln(buf, line) + } + fmt.Fprintf(buf, "\n%s", c.Message) + c.rawData = buf.Bytes() + }) + + return c.rawData +} + +func (c *Commit) Resolve(path []string) (interface{}, []string, error) { + if len(path) == 0 { + return nil, nil, fmt.Errorf("zero length path") + } + + switch path[0] { + case "parents": + if len(path) == 1 { + return c.Parents, nil, nil + } + + i, err := strconv.Atoi(path[1]) + if err != nil { + return nil, nil, err + } + + if i < 0 || i >= len(c.Parents) { + return nil, nil, fmt.Errorf("index out of range") + } + + return &node.Link{Cid: c.Parents[i]}, path[2:], nil + case "author": + if len(path) == 1 { + return c.Author, nil, nil + } + return c.Author.resolve(path[1:]) + case "committer": + if len(path) == 1 { + return c.Committer, nil, nil + } + return c.Committer.resolve(path[1:]) + case "signature": + return c.Sig.Text, path[1:], nil + case "message": + return c.Message, path[1:], nil + case "tree": + return &node.Link{Cid: c.GitTree}, path[1:], nil + case "mergetag": + if len(path) == 1 { + return c.MergeTag, nil, nil + } + + i, err := strconv.Atoi(path[1]) + if err != nil { + return nil, nil, err + } + + if i < 0 || i >= len(c.MergeTag) { + return nil, nil, fmt.Errorf("index out of range") + } + + if len(path) == 2 { + return c.MergeTag[i], nil, nil + } + return c.MergeTag[i].resolve(path[2:]) + default: + return nil, nil, errors.New("no such link") + } +} + +func (c *Commit) ResolveLink(path []string) (*node.Link, []string, error) { + out, rest, err := c.Resolve(path) + if err != nil { + return nil, nil, err + } + + lnk, ok := out.(*node.Link) + if !ok { + return nil, nil, errors.New("not a link") + } + + return lnk, rest, nil +} + +func (c *Commit) Size() (uint64, error) { + return uint64(len(c.RawData())), nil +} + +func (c *Commit) Stat() (*node.NodeStat, error) { + return &node.NodeStat{}, nil +} + +func (c *Commit) String() string { + return "[git commit object]" +} + +func (c *Commit) Tree(p string, depth int) []string { + if depth != -1 { + panic("proper tree not yet implemented") + } + tree := []string{"tree", "parents", "message", "gpgsig"} + tree = append(tree, c.Author.tree("author", depth)...) + tree = append(tree, c.Committer.tree("committer", depth)...) + for i := range c.Parents { + tree = append(tree, fmt.Sprintf("parents/%d", i)) + } + return tree +} + +func (c *Commit) GitSha() []byte { + return cidToSha(c.Cid()) +} + +func (t *MergeTag) resolve(path []string) (interface{}, []string, error) { + if len(path) == 0 { + return nil, nil, fmt.Errorf("zero length path") + } + + switch path[0] { + case "object": + return &node.Link{Cid: t.Object}, path[1:], nil + case "tag": + return t.Tag, path[1:], nil + case "tagger": + if len(path) == 1 { + return t.Tagger, nil, nil + } + return t.Tagger.resolve(path[1:]) + case "text": + return t.Text, path[1:], nil + case "type": + return t.Type, path[1:], nil + default: + return nil, nil, errors.New("no such link") + } +} + +var _ node.Node = (*Commit)(nil) diff --git a/vendor/github.com/ipfs/go-ipld-git/git.go b/vendor/github.com/ipfs/go-ipld-git/git.go new file mode 100644 index 00000000..8a443e84 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-git/git.go @@ -0,0 +1,478 @@ +package ipldgit + +import ( + "bufio" + "bytes" + "compress/zlib" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "strconv" + "strings" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + node "github.com/ipfs/go-ipld-format" + mh "github.com/multiformats/go-multihash" +) + +func DecodeBlock(block blocks.Block) (node.Node, error) { + prefix := block.Cid().Prefix() + + if prefix.Codec != cid.GitRaw || prefix.MhType != mh.SHA1 || prefix.MhLength != mh.DefaultLengths[mh.SHA1] { + return nil, errors.New("invalid CID prefix") + } + + return ParseObjectFromBuffer(block.RawData()) +} + +var _ node.DecodeBlockFunc = DecodeBlock + +func ParseObjectFromBuffer(b []byte) (node.Node, error) { + return ParseObject(bytes.NewReader(b)) +} + +func ParseCompressedObject(r io.Reader) (node.Node, error) { + rc, err := zlib.NewReader(r) + if err != nil { + return nil, err + } + defer rc.Close() + + return ParseObject(rc) +} + +func ParseObject(r io.Reader) (node.Node, error) { + rd := bufio.NewReader(r) + + typ, err := rd.ReadString(' ') + if err != nil { + return nil, err + } + typ = typ[:len(typ)-1] + + switch typ { + case "tree": + return ReadTree(rd) + case "commit": + return ReadCommit(rd) + case "blob": + return ReadBlob(rd) + case "tag": + return ReadTag(rd) + default: + return nil, fmt.Errorf("unrecognized object type: %s", typ) + } +} + +func ReadBlob(rd *bufio.Reader) (*Blob, error) { + size, err := rd.ReadString(0) + if err != nil { + return nil, err + } + + sizen, err := strconv.Atoi(size[:len(size)-1]) + if err != nil { + return nil, err + } + + buf := new(bytes.Buffer) + fmt.Fprintf(buf, "blob %d\x00", sizen) + + n, err := io.Copy(buf, rd) + if err != nil { + return nil, err + } + + if n != int64(sizen) { + return nil, fmt.Errorf("blob size was not accurate") + } + + out := &Blob{} + out.rawData = buf.Bytes() + out.cid = hashObject(out.RawData()) + + return out, nil +} + +func ReadCommit(rd *bufio.Reader) (*Commit, error) { + size, err := rd.ReadString(0) + if err != nil { + return nil, err + } + + out := &Commit{ + DataSize: size[:len(size)-1], + } + + for { + line, _, err := rd.ReadLine() + if err != nil { + if err == io.EOF { + break + } + return nil, err + } + + err = parseCommitLine(out, line, rd) + if err != nil { + return nil, err + } + } + + out.cid = hashObject(out.RawData()) + + return out, nil +} + +func parseCommitLine(out *Commit, line []byte, rd *bufio.Reader) error { + switch { + case bytes.HasPrefix(line, []byte("tree ")): + sha, err := hex.DecodeString(string(line[5:])) + if err != nil { + return err + } + + out.GitTree = shaToCid(sha) + case bytes.HasPrefix(line, []byte("parent ")): + psha, err := hex.DecodeString(string(line[7:])) + if err != nil { + return err + } + + out.Parents = append(out.Parents, shaToCid(psha)) + case bytes.HasPrefix(line, []byte("author ")): + a, err := parsePersonInfo(line) + if err != nil { + return err + } + + out.Author = a + case bytes.HasPrefix(line, []byte("committer ")): + c, err := parsePersonInfo(line) + if err != nil { + return err + } + + out.Committer = c + case bytes.HasPrefix(line, []byte("encoding ")): + out.Encoding = string(line[9:]) + case bytes.HasPrefix(line, []byte("mergetag object ")): + sha, err := hex.DecodeString(string(line)[16:]) + if err != nil { + return err + } + + mt, rest, err := ReadMergeTag(sha, rd) + if err != nil { + return err + } + + out.MergeTag = append(out.MergeTag, mt) + + if rest != nil { + err = parseCommitLine(out, rest, rd) + if err != nil { + return err + } + } + case bytes.HasPrefix(line, []byte("gpgsig ")): + sig, err := ReadGpgSig(rd) + if err != nil { + return err + } + out.Sig = sig + case len(line) == 0: + rest, err := ioutil.ReadAll(rd) + if err != nil { + return err + } + + out.Message = string(rest) + default: + out.Other = append(out.Other, string(line)) + } + return nil +} + +func ReadTag(rd *bufio.Reader) (*Tag, error) { + size, err := rd.ReadString(0) + if err != nil { + return nil, err + } + + out := &Tag{ + dataSize: size[:len(size)-1], + } + + for { + line, _, err := rd.ReadLine() + if err != nil { + if err == io.EOF { + break + } + return nil, err + } + + switch { + case bytes.HasPrefix(line, []byte("object ")): + sha, err := hex.DecodeString(string(line[7:])) + if err != nil { + return nil, err + } + + out.Object = shaToCid(sha) + case bytes.HasPrefix(line, []byte("tag ")): + out.Tag = string(line[4:]) + case bytes.HasPrefix(line, []byte("tagger ")): + c, err := parsePersonInfo(line) + if err != nil { + return nil, err + } + + out.Tagger = c + case bytes.HasPrefix(line, []byte("type ")): + out.Type = string(line[5:]) + case len(line) == 0: + rest, err := ioutil.ReadAll(rd) + if err != nil { + return nil, err + } + + out.Message = string(rest) + default: + fmt.Println("unhandled line: ", string(line)) + } + } + + out.cid = hashObject(out.RawData()) + + return out, nil +} + +func hashObject(data []byte) cid.Cid { + c, err := cid.Prefix{ + MhType: mh.SHA1, + MhLength: -1, + Codec: cid.GitRaw, + Version: 1, + }.Sum(data) + if err != nil { + panic(err) + } + return c +} + +func ReadMergeTag(hash []byte, rd *bufio.Reader) (*MergeTag, []byte, error) { + out := new(MergeTag) + + out.Object = shaToCid(hash) + for { + line, _, err := rd.ReadLine() + if err != nil { + if err == io.EOF { + break + } + return nil, nil, err + } + + switch { + case bytes.HasPrefix(line, []byte(" type ")): + out.Type = string(line[6:]) + case bytes.HasPrefix(line, []byte(" tag ")): + out.Tag = string(line[5:]) + case bytes.HasPrefix(line, []byte(" tagger ")): + tagger, err := parsePersonInfo(line[1:]) + if err != nil { + return nil, nil, err + } + out.Tagger = tagger + case string(line) == " ": + for { + line, _, err := rd.ReadLine() + if err != nil { + return nil, nil, err + } + + if !bytes.HasPrefix(line, []byte(" ")) { + return out, line, nil + } + + out.Text += string(line) + "\n" + } + } + } + return out, nil, nil +} + +func ReadGpgSig(rd *bufio.Reader) (*GpgSig, error) { + line, _, err := rd.ReadLine() + if err != nil { + return nil, err + } + + out := new(GpgSig) + + if string(line) != " " { + if strings.HasPrefix(string(line), " Version: ") || strings.HasPrefix(string(line), " Comment: ") { + out.Text += string(line) + "\n" + } else { + return nil, fmt.Errorf("expected first line of sig to be a single space or version") + } + } else { + out.Text += " \n" + } + + for { + line, _, err := rd.ReadLine() + if err != nil { + return nil, err + } + + if bytes.Equal(line, []byte(" -----END PGP SIGNATURE-----")) { + break + } + + out.Text += string(line) + "\n" + } + + return out, nil +} + +func parsePersonInfo(line []byte) (*PersonInfo, error) { + parts := bytes.Split(line, []byte{' '}) + if len(parts) < 3 { + fmt.Println(string(line)) + return nil, fmt.Errorf("incorrectly formatted person info line") + } + + //TODO: just use regex? + //skip prefix + at := 1 + + var pi PersonInfo + var name string + + for { + if at == len(parts) { + return nil, fmt.Errorf("invalid personInfo: %s\n", line) + } + part := parts[at] + if len(part) != 0 { + if part[0] == '<' { + break + } + name += string(part) + " " + } else if len(name) > 0 { + name += " " + } + at++ + } + if len(name) != 0 { + pi.Name = name[:len(name)-1] + } + + var email string + for { + if at == len(parts) { + return nil, fmt.Errorf("invalid personInfo: %s\n", line) + } + part := parts[at] + if part[0] == '<' { + part = part[1:] + } + + at++ + if part[len(part)-1] == '>' { + email += string(part[:len(part)-1]) + break + } + email += string(part) + " " + } + pi.Email = email + + if at == len(parts) { + return &pi, nil + } + pi.Date = string(parts[at]) + + at++ + if at == len(parts) { + return &pi, nil + } + pi.Timezone = string(parts[at]) + return &pi, nil +} + +func ReadTree(rd *bufio.Reader) (*Tree, error) { + lstr, err := rd.ReadString(0) + if err != nil { + return nil, err + } + lstr = lstr[:len(lstr)-1] + + n, err := strconv.Atoi(lstr) + if err != nil { + return nil, err + } + + t := &Tree{ + entries: make(map[string]*TreeEntry), + size: n, + } + var order []string + for { + e, err := ReadEntry(rd) + if err != nil { + if err == io.EOF { + break + } + return nil, err + } + + order = append(order, e.name) + t.entries[e.name] = e + } + t.order = order + t.cid = hashObject(t.RawData()) + + return t, nil +} + +func cidToSha(c cid.Cid) []byte { + h := c.Hash() + return h[len(h)-20:] +} + +func shaToCid(sha []byte) cid.Cid { + h, _ := mh.Encode(sha, mh.SHA1) + return cid.NewCidV1(cid.GitRaw, h) +} + +func ReadEntry(r *bufio.Reader) (*TreeEntry, error) { + data, err := r.ReadString(' ') + if err != nil { + return nil, err + } + data = data[:len(data)-1] + + name, err := r.ReadString(0) + if err != nil { + return nil, err + } + name = name[:len(name)-1] + + sha := make([]byte, 20) + _, err = io.ReadFull(r, sha) + if err != nil { + return nil, err + } + + return &TreeEntry{ + name: name, + Mode: data, + Hash: shaToCid(sha), + }, nil +} diff --git a/vendor/github.com/ipfs/go-ipld-git/go.mod b/vendor/github.com/ipfs/go-ipld-git/go.mod new file mode 100644 index 00000000..433bb4e8 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-git/go.mod @@ -0,0 +1,8 @@ +module github.com/ipfs/go-ipld-git + +require ( + github.com/ipfs/go-block-format v0.0.2 + github.com/ipfs/go-cid v0.0.2 + github.com/ipfs/go-ipld-format v0.0.1 + github.com/multiformats/go-multihash v0.0.1 +) diff --git a/vendor/github.com/ipfs/go-ipld-git/go.sum b/vendor/github.com/ipfs/go-ipld-git/go.sum new file mode 100644 index 00000000..b524adb8 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-git/go.sum @@ -0,0 +1,30 @@ +github.com/gxed/hashland/keccakpg v0.0.1 h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1 h1:SheiaIt0sda5K+8FLz952/1iWS9zrnKsEJaOJu4ZbSc= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-cid v0.0.1 h1:GBjWPktLnNyX0JiQCNFpUuUSoMw5KMyqrsejHYlILBE= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.2 h1:tuuKaZPU1M6HcejsO3AcYWW8sZ8MTvyxfc4uqB4eFE8= +github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipld-format v0.0.1 h1:HCu4eB/Gh+KD/Q0M8u888RFkorTWNIL3da4oc5dwc80= +github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 h1:5W7KhL8HVF3XCFOweFD3BNESdnO8ewyYTFT2R+/b8FQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multihash v0.0.1 h1:HHwN1K12I+XllBCrqKnhX949Orn4oawPkegHMu2vDqQ= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d h1:Z0Ahzd7HltpJtjAHHxX8QFP3j1yYgiuvjbjRzDj/KH0= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/ipfs/go-ipld-git/make-test-repo.sh b/vendor/github.com/ipfs/go-ipld-git/make-test-repo.sh new file mode 100755 index 00000000..dbb3f230 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-git/make-test-repo.sh @@ -0,0 +1,119 @@ +#!/usr/bin/env bash + +set -x +CUR_DIR=$(pwd) +TEST_DIR=$(mktemp -d) +cd ${TEST_DIR} + +git init + +# Test generic commit/blob + +git config user.name "John Doe" +git config user.email johndoe@example.com + +echo "Hello world" > file +git add file + +git commit -m "Init" + +# Test generic commit/tree/blob, weird person info + +mkdir dir +mkdir dir/subdir +mkdir dir2 + +echo "qwerty" > dir/f1 +echo "123456" > dir/subdir/f2 +echo "',.pyf" > dir2/f3 + +git add . + +git config user.name "John Doe & John Other" +git config user.email "johndoe@example.com, johnother@example.com" +git commit -m "Commit 2" + +# Test merge-tag +git config user.name "John Doe" +git config user.email johndoe@example.com + +git branch dev +git checkout dev + +echo ";qjkxb" > dir/f4 + +git add dir/f4 +git commit -m "Release" +git tag -a v1 -m "Some version" +git checkout master + +## defer eyes.Open() +## eyes.Close() + +git cat-file tag $(cat .git/refs/tags/v1) | head -n4 | sed 's/v1/v1sig/g' > sigobj +cat >>sigobj < sigtag +FILE=.git/objects/$(sha1sum sigtag | cut -d' ' -f1 | sed 's/../\0\//') +mkdir -p $(dirname ${FILE}) +cat sigtag | zlib-flate -compress > ${FILE} +echo $(sha1sum sigtag | cut -d' ' -f1) > .git/refs/tags/v1sig + +git merge v1sig --no-ff -m "Merge tag v1" + +# Test encoding +git config i18n.commitencoding "ISO-8859-1" +echo "fgcrl" > f6 +git add f6 +git commit -m "Encoded" + +# Test iplBlob/tree tags +git tag -a v1-file -m "Some file" 933b7583b7767b07ea4cf242c1be29162eb8bb85 +git tag -a v1-tree -m "Some tree" 672ef117424f54b71e5e058d1184de6a07450d0e + +# Create test 'signed' objects + +git cat-file commit $(cat .git/refs/heads/master) | head -n4 > sigobj +echo "gpgsig -----BEGIN PGP SIGNATURE-----" >> sigobj +echo " " >> sigobj +echo " NotReallyABase64Signature" >> sigobj +echo " ButItsGoodEnough" >> sigobj +echo " -----END PGP SIGNATURE-----" >> sigobj +echo "" >> sigobj +echo "Encoded" >> sigobj + +cat <(printf "commit %d\0" $(wc -c sigobj | cut -d' ' -f1); cat sigobj) > sigcommit +FILE=.git/objects/$(sha1sum sigcommit | cut -d' ' -f1 | sed 's/../\0\//') +mkdir -p $(dirname ${FILE}) +cat sigcommit | zlib-flate -compress > ${FILE} + +git cat-file commit $(cat .git/refs/heads/master) | head -n4 > sigobj +echo "gpgsig -----BEGIN PGP SIGNATURE-----" >> sigobj +echo " Version: 0.1.2" >> sigobj +echo " " >> sigobj +echo " NotReallyABase64Signature" >> sigobj +echo " ButItsGoodEnough" >> sigobj +echo " -----END PGP SIGNATURE-----" >> sigobj +echo " " >> sigobj +echo "" >> sigobj +echo "Encoded" >> sigobj + +cat <(printf "commit %d\0" $(wc -c sigobj | cut -d' ' -f1); cat sigobj) > sigcommit +FILE=.git/objects/$(sha1sum sigcommit | cut -d' ' -f1 | sed 's/../\0\//') +mkdir -p $(dirname ${FILE}) +cat sigcommit | zlib-flate -compress >> ${FILE} +rm sigobj sigcommit + +# Create test archive, clean up + +tar czf git.tar.gz .git +mv git.tar.gz ${CUR_DIR}/testdata.tar.gz +cd ${CUR_DIR} +rm -rf ${TEST_DIR} diff --git a/vendor/github.com/ipfs/go-ipld-git/package.json b/vendor/github.com/ipfs/go-ipld-git/package.json new file mode 100644 index 00000000..cff29bcf --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-git/package.json @@ -0,0 +1,42 @@ +{ + "author": "whyrusleeping", + "bugs": { + "url": "https://github.com/ipfs/go-ipld-git" + }, + "gx": { + "dvcsimport": "github.com/ipfs/go-ipld-git" + }, + "gxDependencies": [ + { + "author": "whyrusleeping", + "hash": "QmZ6nzCLwGLVfRzYLpD7pW6UNuBDKEcA2imJtVpbEx2rxy", + "name": "go-ipld-format", + "version": "0.8.1" + }, + { + "author": "whyrusleeping", + "hash": "QmTbxNB1NwDesLmKTscr4udL2tVP7MaxvXnD1D9yX7g3PN", + "name": "go-cid", + "version": "0.9.3" + }, + { + "author": "stebalien", + "hash": "QmYYLnAzR28nAQ4U5MFniLprnktu6eTFKibeNt96V21EZK", + "name": "go-block-format", + "version": "0.2.2" + }, + { + "author": "multiformats", + "hash": "QmerPMzPk1mJVowm8KgmoknWa4yCYvvugMPsgWmDNUvDLW", + "name": "go-multihash", + "version": "1.0.9" + } + ], + "gxVersion": "0.10.0", + "language": "go", + "license": "", + "name": "go-ipld-git", + "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", + "version": "0.3.6" +} + diff --git a/vendor/github.com/ipfs/go-ipld-git/tag.go b/vendor/github.com/ipfs/go-ipld-git/tag.go new file mode 100644 index 00000000..25245ac9 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-git/tag.go @@ -0,0 +1,136 @@ +package ipldgit + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "sync" + + cid "github.com/ipfs/go-cid" + node "github.com/ipfs/go-ipld-format" +) + +type Tag struct { + Object cid.Cid `json:"object"` + Type string `json:"type"` + Tag string `json:"tag"` + Tagger *PersonInfo `json:"tagger"` + Message string `json:"message"` + dataSize string + + cid cid.Cid + + rawData []byte + rawDataOnce sync.Once +} + +func (t *Tag) Cid() cid.Cid { + return t.cid +} + +func (t *Tag) Copy() node.Node { + nt := *t + return &nt +} + +func (t *Tag) Links() []*node.Link { + return []*node.Link{{Cid: t.Object}} +} + +func (t *Tag) Loggable() map[string]interface{} { + return map[string]interface{}{ + "type": "git_tag", + } +} + +func (t *Tag) RawData() []byte { + t.rawDataOnce.Do(func() { + buf := new(bytes.Buffer) + fmt.Fprintf(buf, "tag %s\x00", t.dataSize) + fmt.Fprintf(buf, "object %s\n", hex.EncodeToString(cidToSha(t.Object))) + fmt.Fprintf(buf, "type %s\n", t.Type) + fmt.Fprintf(buf, "tag %s\n", t.Tag) + if t.Tagger != nil { + fmt.Fprintf(buf, "tagger %s\n", t.Tagger.String()) + } + if t.Message != "" { + fmt.Fprintf(buf, "\n%s", t.Message) + } + t.rawData = buf.Bytes() + }) + + return t.rawData +} + +func (t *Tag) Resolve(path []string) (interface{}, []string, error) { + if len(path) == 0 { + return nil, nil, fmt.Errorf("zero length path") + } + + switch path[0] { + case "object": + return &node.Link{Cid: t.Object}, path[1:], nil + case "type": + return t.Type, path[1:], nil + case "tagger": + if len(path) == 1 { + return t.Tagger, nil, nil + } + return t.Tagger.resolve(path[1:]) + case "message": + return t.Message, path[1:], nil + case "tag": + return t.Tag, path[1:], nil + default: + return nil, nil, errors.New("no such link") + } +} + +func (t *Tag) ResolveLink(path []string) (*node.Link, []string, error) { + out, rest, err := t.Resolve(path) + if err != nil { + return nil, nil, err + } + + lnk, ok := out.(*node.Link) + if !ok { + return nil, nil, errors.New("not a link") + } + + return lnk, rest, nil +} + +func (t *Tag) Size() (uint64, error) { + return uint64(len(t.RawData())), nil +} + +func (t *Tag) Stat() (*node.NodeStat, error) { + return &node.NodeStat{}, nil +} + +func (t *Tag) String() string { + return "[git tag object]" +} + +func (t *Tag) Tree(p string, depth int) []string { + if p != "" { + if p == "tagger" { + return []string{"name", "email", "date"} + } + return nil + } + if depth == 0 { + return nil + } + + tree := []string{"object", "type", "tag", "message"} + tree = append(tree, t.Tagger.tree("tagger", depth)...) + return tree +} + +func (t *Tag) GitSha() []byte { + return cidToSha(t.Cid()) +} + +var _ node.Node = (*Tag)(nil) diff --git a/vendor/github.com/ipfs/go-ipld-git/testdata.tar.gz b/vendor/github.com/ipfs/go-ipld-git/testdata.tar.gz new file mode 100644 index 00000000..658dfe84 Binary files /dev/null and b/vendor/github.com/ipfs/go-ipld-git/testdata.tar.gz differ diff --git a/vendor/github.com/ipfs/go-ipld-git/tree.go b/vendor/github.com/ipfs/go-ipld-git/tree.go new file mode 100644 index 00000000..5ce354ac --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-git/tree.go @@ -0,0 +1,170 @@ +package ipldgit + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "sync" + + cid "github.com/ipfs/go-cid" + node "github.com/ipfs/go-ipld-format" +) + +type Tree struct { + entries map[string]*TreeEntry + size int + order []string + cid cid.Cid + rawData []byte + rawDataOnce sync.Once +} + +type TreeEntry struct { + name string + Mode string `json:"mode"` + Hash cid.Cid `json:"hash"` +} + +func (t *Tree) Cid() cid.Cid { + return t.cid +} + +func (t *Tree) String() string { + return "[git tree object]" +} + +func (t *Tree) GitSha() []byte { + return cidToSha(t.cid) +} + +func (t *Tree) Copy() node.Node { + out := &Tree{ + entries: make(map[string]*TreeEntry), + cid: t.cid, + size: t.size, + order: t.order, // TODO: make a deep copy of this + } + + for k, v := range t.entries { + nv := *v + out.entries[k] = &nv + } + return out +} + +func (t *Tree) MarshalJSON() ([]byte, error) { + return json.Marshal(t.entries) +} + +func (t *Tree) Tree(p string, depth int) []string { + if p != "" { + _, ok := t.entries[p] + if !ok { + return nil + } + + return []string{"mode", "type", "hash"} + } + + if depth == 0 { + return nil + } + + if depth == 1 { + return t.order + } + + var out []string + for k, _ := range t.entries { + out = append(out, k, k+"/mode", k+"/type", k+"/hash") + } + return out +} + +func (t *Tree) Links() []*node.Link { + var out []*node.Link + for _, v := range t.entries { + out = append(out, &node.Link{Cid: v.Hash}) + } + return out +} + +func (t *Tree) Loggable() map[string]interface{} { + return map[string]interface{}{ + "type": "git tree object", + } +} + +func (t *Tree) RawData() []byte { + t.rawDataOnce.Do(func() { + buf := new(bytes.Buffer) + + fmt.Fprintf(buf, "tree %d\x00", t.size) + for _, s := range t.order { + t.entries[s].WriteTo(buf) + } + t.rawData = buf.Bytes() + }) + + return t.rawData +} + +func (t *Tree) Resolve(p []string) (interface{}, []string, error) { + e, ok := t.entries[p[0]] + if !ok { + return nil, nil, errors.New("no such link") + } + + if len(p) == 1 { + return e, nil, nil + } + + switch p[1] { + case "hash": + return &node.Link{Cid: e.Hash}, p[2:], nil + case "mode": + return e.Mode, p[2:], nil + default: + return nil, nil, errors.New("no such link") + } +} + +func (t Tree) ResolveLink(path []string) (*node.Link, []string, error) { + out, rest, err := t.Resolve(path) + if err != nil { + return nil, nil, err + } + + lnk, ok := out.(*node.Link) + if !ok { + return nil, nil, errors.New("not a link") + } + + return lnk, rest, nil +} + +func (t *Tree) Size() (uint64, error) { + return uint64(len(t.RawData())), nil +} + +func (t *Tree) Stat() (*node.NodeStat, error) { + return &node.NodeStat{}, nil +} + +func (te *TreeEntry) WriteTo(w io.Writer) (int, error) { + n, err := fmt.Fprintf(w, "%s %s\x00", te.Mode, te.name) + if err != nil { + return 0, err + } + + nn, err := w.Write(cidToSha(te.Hash)) + if err != nil { + return n, err + } + + return n + nn, nil +} + +var _ node.Node = (*Tree)(nil) diff --git a/vendor/github.com/mmcloughlin/avo/LICENSE b/vendor/github.com/mmcloughlin/avo/LICENSE new file mode 100644 index 00000000..c986d807 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2018, Michael McLoughlin +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/mmcloughlin/avo/attr/attr.go b/vendor/github.com/mmcloughlin/avo/attr/attr.go new file mode 100644 index 00000000..9173773f --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/attr/attr.go @@ -0,0 +1,97 @@ +// Package attr provides attributes for text and data sections. +package attr + +import ( + "fmt" + "math/bits" + "strings" +) + +// Attribute represents TEXT or DATA flags. +type Attribute uint16 + +// Reference: https://github.com/golang/go/blob/35f4ec152b44ae5fc83aaf68e2eb3aa1a778e5cd/src/runtime/textflag.h#L11-L34 +// +// // Don't profile the marked routine. This flag is deprecated. +// #define NOPROF 1 +// // It is ok for the linker to get multiple of these symbols. It will +// // pick one of the duplicates to use. +// #define DUPOK 2 +// // Don't insert stack check preamble. +// #define NOSPLIT 4 +// // Put this data in a read-only section. +// #define RODATA 8 +// // This data contains no pointers. +// #define NOPTR 16 +// // This is a wrapper function and should not count as disabling 'recover'. +// #define WRAPPER 32 +// // This function uses its incoming context register. +// #define NEEDCTXT 64 +// // Allocate a word of thread local storage and store the offset from the +// // thread local base to the thread local storage in this variable. +// #define TLSBSS 256 +// // Do not insert instructions to allocate a stack frame for this function. +// // Only valid on functions that declare a frame size of 0. +// // TODO(mwhudson): only implemented for ppc64x at present. +// #define NOFRAME 512 +// // Function can call reflect.Type.Method or reflect.Type.MethodByName. +// #define REFLECTMETHOD 1024 +// +const ( + NOPROF Attribute = 1 << iota + DUPOK + NOSPLIT + RODATA + NOPTR + WRAPPER + NEEDCTXT + _ + TLSBSS + NOFRAME + REFLECTMETHOD +) + +// Asm returns a representation of the attributes in assembly syntax. This may use macros from "textflags.h"; see ContainsTextFlags() to determine if this header is required. +func (a Attribute) Asm() string { + parts, rest := a.split() + if len(parts) == 0 || rest != 0 { + parts = append(parts, fmt.Sprintf("%d", rest)) + } + return strings.Join(parts, "|") +} + +// ContainsTextFlags returns whether the Asm() representation requires macros in "textflags.h". +func (a Attribute) ContainsTextFlags() bool { + flags, _ := a.split() + return len(flags) > 0 +} + +// split splits a into known flags and any remaining bits. +func (a Attribute) split() ([]string, Attribute) { + var flags []string + var rest Attribute + for a != 0 { + i := uint(bits.TrailingZeros16(uint16(a))) + bit := Attribute(1) << i + if flag := attrname[bit]; flag != "" { + flags = append(flags, flag) + } else { + rest |= bit + } + a ^= bit + } + return flags, rest +} + +var attrname = map[Attribute]string{ + NOPROF: "NOPROF", + DUPOK: "DUPOK", + NOSPLIT: "NOSPLIT", + RODATA: "RODATA", + NOPTR: "NOPTR", + WRAPPER: "WRAPPER", + NEEDCTXT: "NEEDCTXT", + TLSBSS: "TLSBSS", + NOFRAME: "NOFRAME", + // REFLECTMETHOD excluded due to https://golang.org/issue/29487 +} diff --git a/vendor/github.com/mmcloughlin/avo/build/attr.go b/vendor/github.com/mmcloughlin/avo/build/attr.go new file mode 100644 index 00000000..d337537c --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/build/attr.go @@ -0,0 +1,17 @@ +package build + +import "github.com/mmcloughlin/avo/attr" + +// TEXT and DATA attribute values included for convenience. +const ( + NOPROF = attr.NOPROF + DUPOK = attr.DUPOK + NOSPLIT = attr.NOSPLIT + RODATA = attr.RODATA + NOPTR = attr.NOPTR + WRAPPER = attr.WRAPPER + NEEDCTXT = attr.NEEDCTXT + TLSBSS = attr.TLSBSS + NOFRAME = attr.NOFRAME + REFLECTMETHOD = attr.REFLECTMETHOD +) diff --git a/vendor/github.com/mmcloughlin/avo/build/cli.go b/vendor/github.com/mmcloughlin/avo/build/cli.go new file mode 100644 index 00000000..7474cbca --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/build/cli.go @@ -0,0 +1,165 @@ +package build + +import ( + "flag" + "io" + "log" + "os" + "runtime/pprof" + + "github.com/mmcloughlin/avo/pass" + "github.com/mmcloughlin/avo/printer" +) + +// Config contains options for an avo main function. +type Config struct { + ErrOut io.Writer + MaxErrors int // max errors to report; 0 means unlimited + CPUProfile io.WriteCloser + Passes []pass.Interface +} + +// Main is the standard main function for an avo program. This extracts the +// result from the build Context (logging and exiting on error), and performs +// configured passes. +func Main(cfg *Config, context *Context) int { + diag := log.New(cfg.ErrOut, "", 0) + + if cfg.CPUProfile != nil { + defer cfg.CPUProfile.Close() + if err := pprof.StartCPUProfile(cfg.CPUProfile); err != nil { + diag.Println("could not start CPU profile: ", err) + return 1 + } + defer pprof.StopCPUProfile() + } + + f, err := context.Result() + if err != nil { + LogError(diag, err, cfg.MaxErrors) + return 1 + } + + p := pass.Concat(cfg.Passes...) + if err := p.Execute(f); err != nil { + diag.Println(err) + return 1 + } + + return 0 +} + +// Flags represents CLI flags for an avo program. +type Flags struct { + errout *outputValue + allerrors bool + cpuprof *outputValue + printers []*printerValue +} + +// NewFlags initializes avo flags for the given FlagSet. +func NewFlags(fs *flag.FlagSet) *Flags { + f := &Flags{} + + f.errout = newOutputValue(os.Stderr) + fs.Var(f.errout, "log", "diagnostics output") + + fs.BoolVar(&f.allerrors, "e", false, "no limit on number of errors reported") + + f.cpuprof = newOutputValue(nil) + fs.Var(f.cpuprof, "cpuprofile", "write cpu profile to `file`") + + goasm := newPrinterValue(printer.NewGoAsm, os.Stdout) + fs.Var(goasm, "out", "assembly output") + f.printers = append(f.printers, goasm) + + stubs := newPrinterValue(printer.NewStubs, nil) + fs.Var(stubs, "stubs", "go stub file") + f.printers = append(f.printers, stubs) + + return f +} + +// Config builds a configuration object based on flag values. +func (f *Flags) Config() *Config { + pc := printer.NewGoRunConfig() + passes := []pass.Interface{pass.Compile} + for _, pv := range f.printers { + p := pv.Build(pc) + if p != nil { + passes = append(passes, p) + } + } + + cfg := &Config{ + ErrOut: f.errout.w, + MaxErrors: 10, + CPUProfile: f.cpuprof.w, + Passes: passes, + } + + if f.allerrors { + cfg.MaxErrors = 0 + } + + return cfg +} + +type outputValue struct { + w io.WriteCloser + filename string +} + +func newOutputValue(dflt io.WriteCloser) *outputValue { + return &outputValue{w: dflt} +} + +func (o *outputValue) String() string { + if o == nil { + return "" + } + return o.filename +} + +func (o *outputValue) Set(s string) error { + o.filename = s + if s == "-" { + o.w = nopwritecloser{os.Stdout} + return nil + } + f, err := os.Create(s) + if err != nil { + return err + } + o.w = f + return nil +} + +type printerValue struct { + *outputValue + Builder printer.Builder +} + +func newPrinterValue(b printer.Builder, dflt io.WriteCloser) *printerValue { + return &printerValue{ + outputValue: newOutputValue(dflt), + Builder: b, + } +} + +func (p *printerValue) Build(cfg printer.Config) pass.Interface { + if p.outputValue.w == nil { + return nil + } + return &pass.Output{ + Writer: p.outputValue.w, + Printer: p.Builder(cfg), + } +} + +// nopwritecloser wraps a Writer and provides a null implementation of Close(). +type nopwritecloser struct { + io.Writer +} + +func (nopwritecloser) Close() error { return nil } diff --git a/vendor/github.com/mmcloughlin/avo/build/context.go b/vendor/github.com/mmcloughlin/avo/build/context.go new file mode 100644 index 00000000..fb00b4ee --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/build/context.go @@ -0,0 +1,217 @@ +package build + +import ( + "errors" + "fmt" + "go/types" + + "github.com/mmcloughlin/avo/attr" + "github.com/mmcloughlin/avo/buildtags" + "github.com/mmcloughlin/avo/gotypes" + "github.com/mmcloughlin/avo/ir" + "github.com/mmcloughlin/avo/operand" + "github.com/mmcloughlin/avo/reg" + "golang.org/x/tools/go/packages" +) + +// Context maintains state for incrementally building an avo File. +type Context struct { + pkg *packages.Package + file *ir.File + function *ir.Function + global *ir.Global + errs ErrorList + reg.Collection +} + +// NewContext initializes an empty build Context. +func NewContext() *Context { + return &Context{ + file: ir.NewFile(), + Collection: *reg.NewCollection(), + } +} + +// Package sets the package the generated file will belong to. Required to be able to reference types in the package. +func (c *Context) Package(path string) { + cfg := &packages.Config{ + Mode: packages.LoadAllSyntax, + } + pkgs, err := packages.Load(cfg, path) + if err != nil { + c.adderror(err) + return + } + pkg := pkgs[0] + if len(pkg.Errors) > 0 { + for _, err := range pkg.Errors { + c.adderror(err) + } + return + } + c.pkg = pkg +} + +// Constraints sets build constraints for the file. +func (c *Context) Constraints(t buildtags.ConstraintsConvertable) { + cs := t.ToConstraints() + if err := cs.Validate(); err != nil { + c.adderror(err) + return + } + c.file.Constraints = cs +} + +// Constraint appends a constraint to the file's build constraints. +func (c *Context) Constraint(t buildtags.ConstraintConvertable) { + c.Constraints(append(c.file.Constraints, t.ToConstraint())) +} + +// ConstraintExpr appends a constraint to the file's build constraints. The +// constraint to add is parsed from the given expression. The expression should +// look the same as the content following "// +build " in regular build +// constraint comments. +func (c *Context) ConstraintExpr(expr string) { + constraint, err := buildtags.ParseConstraint(expr) + if err != nil { + c.adderror(err) + return + } + c.Constraint(constraint) +} + +// Function starts building a new function with the given name. +func (c *Context) Function(name string) { + c.function = ir.NewFunction(name) + c.file.AddSection(c.function) +} + +// Doc sets documentation comment lines for the currently active function. +func (c *Context) Doc(lines ...string) { + c.activefunc().Doc = lines +} + +// Attributes sets function attributes for the currently active function. +func (c *Context) Attributes(a attr.Attribute) { + c.activefunc().Attributes = a +} + +// Signature sets the signature for the currently active function. +func (c *Context) Signature(s *gotypes.Signature) { + c.activefunc().SetSignature(s) +} + +// SignatureExpr parses the signature expression and sets it as the active function's signature. +func (c *Context) SignatureExpr(expr string) { + s, err := gotypes.ParseSignatureInPackage(c.types(), expr) + if err != nil { + c.adderror(err) + return + } + c.Signature(s) +} + +// Implement starts building a function of the given name, whose type is +// specified by a stub in the containing package. +func (c *Context) Implement(name string) { + pkg := c.types() + if pkg == nil { + c.adderrormessage("no package specified") + return + } + s, err := gotypes.LookupSignature(pkg, name) + if err != nil { + c.adderror(err) + return + } + c.Function(name) + c.Signature(s) +} + +func (c *Context) types() *types.Package { + if c.pkg == nil { + return nil + } + return c.pkg.Types +} + +// AllocLocal allocates size bytes in the stack of the currently active function. +// Returns a reference to the base pointer for the newly allocated region. +func (c *Context) AllocLocal(size int) operand.Mem { + return c.activefunc().AllocLocal(size) +} + +// Instruction adds an instruction to the active function. +func (c *Context) Instruction(i *ir.Instruction) { + c.activefunc().AddInstruction(i) +} + +// Label adds a label to the active function. +func (c *Context) Label(name string) { + c.activefunc().AddLabel(ir.Label(name)) +} + +// Comment adds comment lines to the active function. +func (c *Context) Comment(lines ...string) { + c.activefunc().AddComment(lines...) +} + +// Commentf adds a formtted comment line. +func (c *Context) Commentf(format string, a ...interface{}) { + c.Comment(fmt.Sprintf(format, a...)) +} + +func (c *Context) activefunc() *ir.Function { + if c.function == nil { + c.adderrormessage("no active function") + return ir.NewFunction("") + } + return c.function +} + +//go:generate avogen -output zinstructions.go build + +// StaticGlobal adds a new static data section to the file and returns a pointer to it. +func (c *Context) StaticGlobal(name string) operand.Mem { + c.global = ir.NewStaticGlobal(name) + c.file.AddSection(c.global) + return c.global.Base() +} + +// DataAttributes sets the attributes on the current active global data section. +func (c *Context) DataAttributes(a attr.Attribute) { + c.activeglobal().Attributes = a +} + +// AddDatum adds constant v at offset to the current active global data section. +func (c *Context) AddDatum(offset int, v operand.Constant) { + if err := c.activeglobal().AddDatum(ir.NewDatum(offset, v)); err != nil { + c.adderror(err) + } +} + +// AppendDatum appends a constant to the current active global data section. +func (c *Context) AppendDatum(v operand.Constant) { + c.activeglobal().Append(v) +} + +func (c *Context) activeglobal() *ir.Global { + if c.global == nil { + c.adderrormessage("no active global") + return ir.NewStaticGlobal("") + } + return c.global +} + +func (c *Context) adderror(err error) { + c.errs.addext(err) +} + +func (c *Context) adderrormessage(msg string) { + c.adderror(errors.New(msg)) +} + +// Result returns the built file and any accumulated errors. +func (c *Context) Result() (*ir.File, error) { + return c.file, c.errs.Err() +} diff --git a/vendor/github.com/mmcloughlin/avo/build/doc.go b/vendor/github.com/mmcloughlin/avo/build/doc.go new file mode 100644 index 00000000..8b9a6047 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/build/doc.go @@ -0,0 +1,2 @@ +// Package build provides an assembly-like interface for incremental building of avo Files. +package build diff --git a/vendor/github.com/mmcloughlin/avo/build/error.go b/vendor/github.com/mmcloughlin/avo/build/error.go new file mode 100644 index 00000000..1da00cbf --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/build/error.go @@ -0,0 +1,88 @@ +package build + +import ( + "fmt" + "log" + + "github.com/mmcloughlin/avo/internal/stack" + "github.com/mmcloughlin/avo/src" +) + +// Error represents an error during building, optionally tagged with the position at which it happened. +type Error struct { + Position src.Position + Err error +} + +// exterr constructs an Error with position derived from the first frame in the +// call stack outside this package. +func exterr(err error) Error { + e := Error{Err: err} + if f := stack.ExternalCaller(); f != nil { + e.Position = src.FramePosition(*f).Relwd() + } + return e +} + +func (e Error) Error() string { + msg := e.Err.Error() + if e.Position.IsValid() { + return e.Position.String() + ": " + msg + } + return msg +} + +// ErrorList is a collection of errors for a source file. +type ErrorList []Error + +// Add appends an error to the list. +func (e *ErrorList) Add(err Error) { + *e = append(*e, err) +} + +// AddAt appends an error at position p. +func (e *ErrorList) AddAt(p src.Position, err error) { + e.Add(Error{p, err}) +} + +// addext appends an error to the list, tagged with the +func (e *ErrorList) addext(err error) { + e.Add(exterr(err)) +} + +// Err returns an error equivalent to this error list. +// If the list is empty, Err returns nil. +func (e ErrorList) Err() error { + if len(e) == 0 { + return nil + } + return e +} + +// An ErrorList implements the error interface. +func (e ErrorList) Error() string { + switch len(e) { + case 0: + return "no errors" + case 1: + return e[0].Error() + } + return fmt.Sprintf("%s (and %d more errors)", e[0], len(e)-1) +} + +// LogError logs a list of errors, one error per line, if the err parameter is +// an ErrorList. Otherwise it just logs the err string. Reports at most max +// errors, or unlimited if max is 0. +func LogError(l *log.Logger, err error, max int) { + if list, ok := err.(ErrorList); ok { + for i, e := range list { + if max > 0 && i == max { + l.Print("too many errors") + return + } + l.Printf("%s\n", e) + } + } else if err != nil { + l.Printf("%s\n", err) + } +} diff --git a/vendor/github.com/mmcloughlin/avo/build/global.go b/vendor/github.com/mmcloughlin/avo/build/global.go new file mode 100644 index 00000000..148b90cf --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/build/global.go @@ -0,0 +1,142 @@ +package build + +import ( + "flag" + "os" + + "github.com/mmcloughlin/avo/attr" + "github.com/mmcloughlin/avo/buildtags" + "github.com/mmcloughlin/avo/gotypes" + "github.com/mmcloughlin/avo/operand" + + "github.com/mmcloughlin/avo/reg" +) + +// ctx provides a global build context. +var ctx = NewContext() + +// TEXT starts building a new function called name, with attributes a, and sets its signature (see SignatureExpr). +func TEXT(name string, a attr.Attribute, signature string) { + ctx.Function(name) + ctx.Attributes(a) + ctx.SignatureExpr(signature) +} + +// GLOBL declares a new static global data section with the given attributes. +func GLOBL(name string, a attr.Attribute) operand.Mem { + // TODO(mbm): should this be static? + g := ctx.StaticGlobal(name) + ctx.DataAttributes(a) + return g +} + +// DATA adds a data value to the active data section. +func DATA(offset int, v operand.Constant) { + ctx.AddDatum(offset, v) +} + +var flags = NewFlags(flag.CommandLine) + +// Generate builds and compiles the avo file built with the global context. This +// should be the final line of any avo program. Configuration is determined from command-line flags. +func Generate() { + if !flag.Parsed() { + flag.Parse() + } + cfg := flags.Config() + + status := Main(cfg, ctx) + + // To record coverage of integration tests we wrap main() functions in a test + // functions. In this case we need the main function to terminate, therefore we + // only exit for failure status codes. + if status != 0 { + os.Exit(status) + } +} + +// Package sets the package the generated file will belong to. Required to be able to reference types in the package. +func Package(path string) { ctx.Package(path) } + +// Constraints sets build constraints for the file. +func Constraints(t buildtags.ConstraintsConvertable) { ctx.Constraints(t) } + +// Constraint appends a constraint to the file's build constraints. +func Constraint(t buildtags.ConstraintConvertable) { ctx.Constraint(t) } + +// ConstraintExpr appends a constraint to the file's build constraints. The +// constraint to add is parsed from the given expression. The expression should +// look the same as the content following "// +build " in regular build +// constraint comments. +func ConstraintExpr(expr string) { ctx.ConstraintExpr(expr) } + +// GP8 allocates and returns a general-purpose 8-bit register. +func GP8() reg.GPVirtual { return ctx.GP8() } + +// GP16 allocates and returns a general-purpose 16-bit register. +func GP16() reg.GPVirtual { return ctx.GP16() } + +// GP32 allocates and returns a general-purpose 32-bit register. +func GP32() reg.GPVirtual { return ctx.GP32() } + +// GP64 allocates and returns a general-purpose 64-bit register. +func GP64() reg.GPVirtual { return ctx.GP64() } + +// XMM allocates and returns a 128-bit vector register. +func XMM() reg.VecVirtual { return ctx.XMM() } + +// YMM allocates and returns a 256-bit vector register. +func YMM() reg.VecVirtual { return ctx.YMM() } + +// ZMM allocates and returns a 512-bit vector register. +func ZMM() reg.VecVirtual { return ctx.ZMM() } + +// Param returns a the named argument of the active function. +func Param(name string) gotypes.Component { return ctx.Param(name) } + +// ParamIndex returns the ith argument of the active function. +func ParamIndex(i int) gotypes.Component { return ctx.ParamIndex(i) } + +// Return returns a the named return value of the active function. +func Return(name string) gotypes.Component { return ctx.Return(name) } + +// ReturnIndex returns the ith argument of the active function. +func ReturnIndex(i int) gotypes.Component { return ctx.ReturnIndex(i) } + +// Load the function argument src into register dst. Returns the destination +// register. This is syntactic sugar: it will attempt to select the right MOV +// instruction based on the types involved. +func Load(src gotypes.Component, dst reg.Register) reg.Register { return ctx.Load(src, dst) } + +// Store register src into return value dst. This is syntactic sugar: it will +// attempt to select the right MOV instruction based on the types involved. +func Store(src reg.Register, dst gotypes.Component) { ctx.Store(src, dst) } + +// Dereference loads a pointer and returns its element type. +func Dereference(ptr gotypes.Component) gotypes.Component { return ctx.Dereference(ptr) } + +// Doc sets documentation comment lines for the currently active function. +func Doc(lines ...string) { ctx.Doc(lines...) } + +// Attributes sets function attributes for the currently active function. +func Attributes(a attr.Attribute) { ctx.Attributes(a) } + +// Implement starts building a function of the given name, whose type is +// specified by a stub in the containing package. +func Implement(name string) { ctx.Implement(name) } + +// AllocLocal allocates size bytes in the stack of the currently active function. +// Returns a reference to the base pointer for the newly allocated region. +func AllocLocal(size int) operand.Mem { return ctx.AllocLocal(size) } + +// Label adds a label to the active function. +func Label(name string) { ctx.Label(name) } + +// Comment adds comment lines to the active function. +func Comment(lines ...string) { ctx.Comment(lines...) } + +// Commentf adds a formtted comment line. +func Commentf(format string, a ...interface{}) { ctx.Commentf(format, a...) } + +// ConstData builds a static data section containing just the given constant. +func ConstData(name string, v operand.Constant) operand.Mem { return ctx.ConstData(name, v) } diff --git a/vendor/github.com/mmcloughlin/avo/build/pseudo.go b/vendor/github.com/mmcloughlin/avo/build/pseudo.go new file mode 100644 index 00000000..83a570e4 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/build/pseudo.go @@ -0,0 +1,70 @@ +package build + +import ( + "github.com/mmcloughlin/avo/attr" + "github.com/mmcloughlin/avo/operand" + "github.com/mmcloughlin/avo/reg" + + "github.com/mmcloughlin/avo/gotypes" +) + +//go:generate avogen -output zmov.go mov + +// Param returns a the named argument of the active function. +func (c *Context) Param(name string) gotypes.Component { + return c.activefunc().Signature.Params().Lookup(name) +} + +// ParamIndex returns the ith argument of the active function. +func (c *Context) ParamIndex(i int) gotypes.Component { + return c.activefunc().Signature.Params().At(i) +} + +// Return returns a the named return value of the active function. +func (c *Context) Return(name string) gotypes.Component { + return c.activefunc().Signature.Results().Lookup(name) +} + +// ReturnIndex returns the ith argument of the active function. +func (c *Context) ReturnIndex(i int) gotypes.Component { + return c.activefunc().Signature.Results().At(i) +} + +// Load the function argument src into register dst. Returns the destination +// register. This is syntactic sugar: it will attempt to select the right MOV +// instruction based on the types involved. +func (c *Context) Load(src gotypes.Component, dst reg.Register) reg.Register { + b, err := src.Resolve() + if err != nil { + c.adderror(err) + return dst + } + c.mov(b.Addr, dst, int(gotypes.Sizes.Sizeof(b.Type)), int(dst.Size()), b.Type) + return dst +} + +// Store register src into return value dst. This is syntactic sugar: it will +// attempt to select the right MOV instruction based on the types involved. +func (c *Context) Store(src reg.Register, dst gotypes.Component) { + b, err := dst.Resolve() + if err != nil { + c.adderror(err) + return + } + c.mov(src, b.Addr, int(src.Size()), int(gotypes.Sizes.Sizeof(b.Type)), b.Type) +} + +// Dereference loads a pointer and returns its element type. +func (c *Context) Dereference(ptr gotypes.Component) gotypes.Component { + r := c.GP64() + c.Load(ptr, r) + return ptr.Dereference(r) +} + +// ConstData builds a static data section containing just the given constant. +func (c *Context) ConstData(name string, v operand.Constant) operand.Mem { + g := c.StaticGlobal(name) + c.DataAttributes(attr.RODATA | attr.NOPTR) + c.AppendDatum(v) + return g +} diff --git a/vendor/github.com/mmcloughlin/avo/build/zinstructions.go b/vendor/github.com/mmcloughlin/avo/build/zinstructions.go new file mode 100644 index 00000000..33c2085e --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/build/zinstructions.go @@ -0,0 +1,26315 @@ +// Code generated by command: avogen -output zinstructions.go build. DO NOT EDIT. + +package build + +import ( + "github.com/mmcloughlin/avo/operand" + "github.com/mmcloughlin/avo/x86" +) + +// ADCB: Add with Carry. +// +// Forms: +// +// ADCB imm8 al +// ADCB imm8 r8 +// ADCB r8 r8 +// ADCB m8 r8 +// ADCB imm8 m8 +// ADCB r8 m8 +// Construct and append a ADCB instruction to the active function. +func (c *Context) ADCB(imr, amr operand.Op) { + if inst, err := x86.ADCB(imr, amr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ADCB: Add with Carry. +// +// Forms: +// +// ADCB imm8 al +// ADCB imm8 r8 +// ADCB r8 r8 +// ADCB m8 r8 +// ADCB imm8 m8 +// ADCB r8 m8 +// Construct and append a ADCB instruction to the active function. +// Operates on the global context. +func ADCB(imr, amr operand.Op) { ctx.ADCB(imr, amr) } + +// ADCL: Add with Carry. +// +// Forms: +// +// ADCL imm32 eax +// ADCL imm8 r32 +// ADCL imm32 r32 +// ADCL r32 r32 +// ADCL m32 r32 +// ADCL imm8 m32 +// ADCL imm32 m32 +// ADCL r32 m32 +// Construct and append a ADCL instruction to the active function. +func (c *Context) ADCL(imr, emr operand.Op) { + if inst, err := x86.ADCL(imr, emr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ADCL: Add with Carry. +// +// Forms: +// +// ADCL imm32 eax +// ADCL imm8 r32 +// ADCL imm32 r32 +// ADCL r32 r32 +// ADCL m32 r32 +// ADCL imm8 m32 +// ADCL imm32 m32 +// ADCL r32 m32 +// Construct and append a ADCL instruction to the active function. +// Operates on the global context. +func ADCL(imr, emr operand.Op) { ctx.ADCL(imr, emr) } + +// ADCQ: Add with Carry. +// +// Forms: +// +// ADCQ imm32 rax +// ADCQ imm8 r64 +// ADCQ imm32 r64 +// ADCQ r64 r64 +// ADCQ m64 r64 +// ADCQ imm8 m64 +// ADCQ imm32 m64 +// ADCQ r64 m64 +// Construct and append a ADCQ instruction to the active function. +func (c *Context) ADCQ(imr, mr operand.Op) { + if inst, err := x86.ADCQ(imr, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ADCQ: Add with Carry. +// +// Forms: +// +// ADCQ imm32 rax +// ADCQ imm8 r64 +// ADCQ imm32 r64 +// ADCQ r64 r64 +// ADCQ m64 r64 +// ADCQ imm8 m64 +// ADCQ imm32 m64 +// ADCQ r64 m64 +// Construct and append a ADCQ instruction to the active function. +// Operates on the global context. +func ADCQ(imr, mr operand.Op) { ctx.ADCQ(imr, mr) } + +// ADCW: Add with Carry. +// +// Forms: +// +// ADCW imm16 ax +// ADCW imm8 r16 +// ADCW imm16 r16 +// ADCW r16 r16 +// ADCW m16 r16 +// ADCW imm8 m16 +// ADCW imm16 m16 +// ADCW r16 m16 +// Construct and append a ADCW instruction to the active function. +func (c *Context) ADCW(imr, amr operand.Op) { + if inst, err := x86.ADCW(imr, amr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ADCW: Add with Carry. +// +// Forms: +// +// ADCW imm16 ax +// ADCW imm8 r16 +// ADCW imm16 r16 +// ADCW r16 r16 +// ADCW m16 r16 +// ADCW imm8 m16 +// ADCW imm16 m16 +// ADCW r16 m16 +// Construct and append a ADCW instruction to the active function. +// Operates on the global context. +func ADCW(imr, amr operand.Op) { ctx.ADCW(imr, amr) } + +// ADCXL: Unsigned Integer Addition of Two Operands with Carry Flag. +// +// Forms: +// +// ADCXL r32 r32 +// ADCXL m32 r32 +// Construct and append a ADCXL instruction to the active function. +func (c *Context) ADCXL(mr, r operand.Op) { + if inst, err := x86.ADCXL(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ADCXL: Unsigned Integer Addition of Two Operands with Carry Flag. +// +// Forms: +// +// ADCXL r32 r32 +// ADCXL m32 r32 +// Construct and append a ADCXL instruction to the active function. +// Operates on the global context. +func ADCXL(mr, r operand.Op) { ctx.ADCXL(mr, r) } + +// ADCXQ: Unsigned Integer Addition of Two Operands with Carry Flag. +// +// Forms: +// +// ADCXQ r64 r64 +// ADCXQ m64 r64 +// Construct and append a ADCXQ instruction to the active function. +func (c *Context) ADCXQ(mr, r operand.Op) { + if inst, err := x86.ADCXQ(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ADCXQ: Unsigned Integer Addition of Two Operands with Carry Flag. +// +// Forms: +// +// ADCXQ r64 r64 +// ADCXQ m64 r64 +// Construct and append a ADCXQ instruction to the active function. +// Operates on the global context. +func ADCXQ(mr, r operand.Op) { ctx.ADCXQ(mr, r) } + +// ADDB: Add. +// +// Forms: +// +// ADDB imm8 al +// ADDB imm8 r8 +// ADDB r8 r8 +// ADDB m8 r8 +// ADDB imm8 m8 +// ADDB r8 m8 +// Construct and append a ADDB instruction to the active function. +func (c *Context) ADDB(imr, amr operand.Op) { + if inst, err := x86.ADDB(imr, amr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ADDB: Add. +// +// Forms: +// +// ADDB imm8 al +// ADDB imm8 r8 +// ADDB r8 r8 +// ADDB m8 r8 +// ADDB imm8 m8 +// ADDB r8 m8 +// Construct and append a ADDB instruction to the active function. +// Operates on the global context. +func ADDB(imr, amr operand.Op) { ctx.ADDB(imr, amr) } + +// ADDL: Add. +// +// Forms: +// +// ADDL imm32 eax +// ADDL imm8 r32 +// ADDL imm32 r32 +// ADDL r32 r32 +// ADDL m32 r32 +// ADDL imm8 m32 +// ADDL imm32 m32 +// ADDL r32 m32 +// Construct and append a ADDL instruction to the active function. +func (c *Context) ADDL(imr, emr operand.Op) { + if inst, err := x86.ADDL(imr, emr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ADDL: Add. +// +// Forms: +// +// ADDL imm32 eax +// ADDL imm8 r32 +// ADDL imm32 r32 +// ADDL r32 r32 +// ADDL m32 r32 +// ADDL imm8 m32 +// ADDL imm32 m32 +// ADDL r32 m32 +// Construct and append a ADDL instruction to the active function. +// Operates on the global context. +func ADDL(imr, emr operand.Op) { ctx.ADDL(imr, emr) } + +// ADDPD: Add Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// ADDPD xmm xmm +// ADDPD m128 xmm +// Construct and append a ADDPD instruction to the active function. +func (c *Context) ADDPD(mx, x operand.Op) { + if inst, err := x86.ADDPD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ADDPD: Add Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// ADDPD xmm xmm +// ADDPD m128 xmm +// Construct and append a ADDPD instruction to the active function. +// Operates on the global context. +func ADDPD(mx, x operand.Op) { ctx.ADDPD(mx, x) } + +// ADDPS: Add Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// ADDPS xmm xmm +// ADDPS m128 xmm +// Construct and append a ADDPS instruction to the active function. +func (c *Context) ADDPS(mx, x operand.Op) { + if inst, err := x86.ADDPS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ADDPS: Add Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// ADDPS xmm xmm +// ADDPS m128 xmm +// Construct and append a ADDPS instruction to the active function. +// Operates on the global context. +func ADDPS(mx, x operand.Op) { ctx.ADDPS(mx, x) } + +// ADDQ: Add. +// +// Forms: +// +// ADDQ imm32 rax +// ADDQ imm8 r64 +// ADDQ imm32 r64 +// ADDQ r64 r64 +// ADDQ m64 r64 +// ADDQ imm8 m64 +// ADDQ imm32 m64 +// ADDQ r64 m64 +// Construct and append a ADDQ instruction to the active function. +func (c *Context) ADDQ(imr, mr operand.Op) { + if inst, err := x86.ADDQ(imr, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ADDQ: Add. +// +// Forms: +// +// ADDQ imm32 rax +// ADDQ imm8 r64 +// ADDQ imm32 r64 +// ADDQ r64 r64 +// ADDQ m64 r64 +// ADDQ imm8 m64 +// ADDQ imm32 m64 +// ADDQ r64 m64 +// Construct and append a ADDQ instruction to the active function. +// Operates on the global context. +func ADDQ(imr, mr operand.Op) { ctx.ADDQ(imr, mr) } + +// ADDSD: Add Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// ADDSD xmm xmm +// ADDSD m64 xmm +// Construct and append a ADDSD instruction to the active function. +func (c *Context) ADDSD(mx, x operand.Op) { + if inst, err := x86.ADDSD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ADDSD: Add Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// ADDSD xmm xmm +// ADDSD m64 xmm +// Construct and append a ADDSD instruction to the active function. +// Operates on the global context. +func ADDSD(mx, x operand.Op) { ctx.ADDSD(mx, x) } + +// ADDSS: Add Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// ADDSS xmm xmm +// ADDSS m32 xmm +// Construct and append a ADDSS instruction to the active function. +func (c *Context) ADDSS(mx, x operand.Op) { + if inst, err := x86.ADDSS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ADDSS: Add Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// ADDSS xmm xmm +// ADDSS m32 xmm +// Construct and append a ADDSS instruction to the active function. +// Operates on the global context. +func ADDSS(mx, x operand.Op) { ctx.ADDSS(mx, x) } + +// ADDSUBPD: Packed Double-FP Add/Subtract. +// +// Forms: +// +// ADDSUBPD xmm xmm +// ADDSUBPD m128 xmm +// Construct and append a ADDSUBPD instruction to the active function. +func (c *Context) ADDSUBPD(mx, x operand.Op) { + if inst, err := x86.ADDSUBPD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ADDSUBPD: Packed Double-FP Add/Subtract. +// +// Forms: +// +// ADDSUBPD xmm xmm +// ADDSUBPD m128 xmm +// Construct and append a ADDSUBPD instruction to the active function. +// Operates on the global context. +func ADDSUBPD(mx, x operand.Op) { ctx.ADDSUBPD(mx, x) } + +// ADDSUBPS: Packed Single-FP Add/Subtract. +// +// Forms: +// +// ADDSUBPS xmm xmm +// ADDSUBPS m128 xmm +// Construct and append a ADDSUBPS instruction to the active function. +func (c *Context) ADDSUBPS(mx, x operand.Op) { + if inst, err := x86.ADDSUBPS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ADDSUBPS: Packed Single-FP Add/Subtract. +// +// Forms: +// +// ADDSUBPS xmm xmm +// ADDSUBPS m128 xmm +// Construct and append a ADDSUBPS instruction to the active function. +// Operates on the global context. +func ADDSUBPS(mx, x operand.Op) { ctx.ADDSUBPS(mx, x) } + +// ADDW: Add. +// +// Forms: +// +// ADDW imm16 ax +// ADDW imm8 r16 +// ADDW imm16 r16 +// ADDW r16 r16 +// ADDW m16 r16 +// ADDW imm8 m16 +// ADDW imm16 m16 +// ADDW r16 m16 +// Construct and append a ADDW instruction to the active function. +func (c *Context) ADDW(imr, amr operand.Op) { + if inst, err := x86.ADDW(imr, amr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ADDW: Add. +// +// Forms: +// +// ADDW imm16 ax +// ADDW imm8 r16 +// ADDW imm16 r16 +// ADDW r16 r16 +// ADDW m16 r16 +// ADDW imm8 m16 +// ADDW imm16 m16 +// ADDW r16 m16 +// Construct and append a ADDW instruction to the active function. +// Operates on the global context. +func ADDW(imr, amr operand.Op) { ctx.ADDW(imr, amr) } + +// ADOXL: Unsigned Integer Addition of Two Operands with Overflow Flag. +// +// Forms: +// +// ADOXL r32 r32 +// ADOXL m32 r32 +// Construct and append a ADOXL instruction to the active function. +func (c *Context) ADOXL(mr, r operand.Op) { + if inst, err := x86.ADOXL(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ADOXL: Unsigned Integer Addition of Two Operands with Overflow Flag. +// +// Forms: +// +// ADOXL r32 r32 +// ADOXL m32 r32 +// Construct and append a ADOXL instruction to the active function. +// Operates on the global context. +func ADOXL(mr, r operand.Op) { ctx.ADOXL(mr, r) } + +// ADOXQ: Unsigned Integer Addition of Two Operands with Overflow Flag. +// +// Forms: +// +// ADOXQ r64 r64 +// ADOXQ m64 r64 +// Construct and append a ADOXQ instruction to the active function. +func (c *Context) ADOXQ(mr, r operand.Op) { + if inst, err := x86.ADOXQ(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ADOXQ: Unsigned Integer Addition of Two Operands with Overflow Flag. +// +// Forms: +// +// ADOXQ r64 r64 +// ADOXQ m64 r64 +// Construct and append a ADOXQ instruction to the active function. +// Operates on the global context. +func ADOXQ(mr, r operand.Op) { ctx.ADOXQ(mr, r) } + +// AESDEC: Perform One Round of an AES Decryption Flow. +// +// Forms: +// +// AESDEC xmm xmm +// AESDEC m128 xmm +// Construct and append a AESDEC instruction to the active function. +func (c *Context) AESDEC(mx, x operand.Op) { + if inst, err := x86.AESDEC(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// AESDEC: Perform One Round of an AES Decryption Flow. +// +// Forms: +// +// AESDEC xmm xmm +// AESDEC m128 xmm +// Construct and append a AESDEC instruction to the active function. +// Operates on the global context. +func AESDEC(mx, x operand.Op) { ctx.AESDEC(mx, x) } + +// AESDECLAST: Perform Last Round of an AES Decryption Flow. +// +// Forms: +// +// AESDECLAST xmm xmm +// AESDECLAST m128 xmm +// Construct and append a AESDECLAST instruction to the active function. +func (c *Context) AESDECLAST(mx, x operand.Op) { + if inst, err := x86.AESDECLAST(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// AESDECLAST: Perform Last Round of an AES Decryption Flow. +// +// Forms: +// +// AESDECLAST xmm xmm +// AESDECLAST m128 xmm +// Construct and append a AESDECLAST instruction to the active function. +// Operates on the global context. +func AESDECLAST(mx, x operand.Op) { ctx.AESDECLAST(mx, x) } + +// AESENC: Perform One Round of an AES Encryption Flow. +// +// Forms: +// +// AESENC xmm xmm +// AESENC m128 xmm +// Construct and append a AESENC instruction to the active function. +func (c *Context) AESENC(mx, x operand.Op) { + if inst, err := x86.AESENC(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// AESENC: Perform One Round of an AES Encryption Flow. +// +// Forms: +// +// AESENC xmm xmm +// AESENC m128 xmm +// Construct and append a AESENC instruction to the active function. +// Operates on the global context. +func AESENC(mx, x operand.Op) { ctx.AESENC(mx, x) } + +// AESENCLAST: Perform Last Round of an AES Encryption Flow. +// +// Forms: +// +// AESENCLAST xmm xmm +// AESENCLAST m128 xmm +// Construct and append a AESENCLAST instruction to the active function. +func (c *Context) AESENCLAST(mx, x operand.Op) { + if inst, err := x86.AESENCLAST(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// AESENCLAST: Perform Last Round of an AES Encryption Flow. +// +// Forms: +// +// AESENCLAST xmm xmm +// AESENCLAST m128 xmm +// Construct and append a AESENCLAST instruction to the active function. +// Operates on the global context. +func AESENCLAST(mx, x operand.Op) { ctx.AESENCLAST(mx, x) } + +// AESIMC: Perform the AES InvMixColumn Transformation. +// +// Forms: +// +// AESIMC xmm xmm +// AESIMC m128 xmm +// Construct and append a AESIMC instruction to the active function. +func (c *Context) AESIMC(mx, x operand.Op) { + if inst, err := x86.AESIMC(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// AESIMC: Perform the AES InvMixColumn Transformation. +// +// Forms: +// +// AESIMC xmm xmm +// AESIMC m128 xmm +// Construct and append a AESIMC instruction to the active function. +// Operates on the global context. +func AESIMC(mx, x operand.Op) { ctx.AESIMC(mx, x) } + +// AESKEYGENASSIST: AES Round Key Generation Assist. +// +// Forms: +// +// AESKEYGENASSIST imm8 xmm xmm +// AESKEYGENASSIST imm8 m128 xmm +// Construct and append a AESKEYGENASSIST instruction to the active function. +func (c *Context) AESKEYGENASSIST(i, mx, x operand.Op) { + if inst, err := x86.AESKEYGENASSIST(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// AESKEYGENASSIST: AES Round Key Generation Assist. +// +// Forms: +// +// AESKEYGENASSIST imm8 xmm xmm +// AESKEYGENASSIST imm8 m128 xmm +// Construct and append a AESKEYGENASSIST instruction to the active function. +// Operates on the global context. +func AESKEYGENASSIST(i, mx, x operand.Op) { ctx.AESKEYGENASSIST(i, mx, x) } + +// ANDB: Logical AND. +// +// Forms: +// +// ANDB imm8 al +// ANDB imm8 r8 +// ANDB r8 r8 +// ANDB m8 r8 +// ANDB imm8 m8 +// ANDB r8 m8 +// Construct and append a ANDB instruction to the active function. +func (c *Context) ANDB(imr, amr operand.Op) { + if inst, err := x86.ANDB(imr, amr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ANDB: Logical AND. +// +// Forms: +// +// ANDB imm8 al +// ANDB imm8 r8 +// ANDB r8 r8 +// ANDB m8 r8 +// ANDB imm8 m8 +// ANDB r8 m8 +// Construct and append a ANDB instruction to the active function. +// Operates on the global context. +func ANDB(imr, amr operand.Op) { ctx.ANDB(imr, amr) } + +// ANDL: Logical AND. +// +// Forms: +// +// ANDL imm32 eax +// ANDL imm8 r32 +// ANDL imm32 r32 +// ANDL r32 r32 +// ANDL m32 r32 +// ANDL imm8 m32 +// ANDL imm32 m32 +// ANDL r32 m32 +// Construct and append a ANDL instruction to the active function. +func (c *Context) ANDL(imr, emr operand.Op) { + if inst, err := x86.ANDL(imr, emr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ANDL: Logical AND. +// +// Forms: +// +// ANDL imm32 eax +// ANDL imm8 r32 +// ANDL imm32 r32 +// ANDL r32 r32 +// ANDL m32 r32 +// ANDL imm8 m32 +// ANDL imm32 m32 +// ANDL r32 m32 +// Construct and append a ANDL instruction to the active function. +// Operates on the global context. +func ANDL(imr, emr operand.Op) { ctx.ANDL(imr, emr) } + +// ANDNL: Logical AND NOT. +// +// Forms: +// +// ANDNL r32 r32 r32 +// ANDNL m32 r32 r32 +// Construct and append a ANDNL instruction to the active function. +func (c *Context) ANDNL(mr, r, r1 operand.Op) { + if inst, err := x86.ANDNL(mr, r, r1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ANDNL: Logical AND NOT. +// +// Forms: +// +// ANDNL r32 r32 r32 +// ANDNL m32 r32 r32 +// Construct and append a ANDNL instruction to the active function. +// Operates on the global context. +func ANDNL(mr, r, r1 operand.Op) { ctx.ANDNL(mr, r, r1) } + +// ANDNPD: Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// ANDNPD xmm xmm +// ANDNPD m128 xmm +// Construct and append a ANDNPD instruction to the active function. +func (c *Context) ANDNPD(mx, x operand.Op) { + if inst, err := x86.ANDNPD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ANDNPD: Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// ANDNPD xmm xmm +// ANDNPD m128 xmm +// Construct and append a ANDNPD instruction to the active function. +// Operates on the global context. +func ANDNPD(mx, x operand.Op) { ctx.ANDNPD(mx, x) } + +// ANDNPS: Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// ANDNPS xmm xmm +// ANDNPS m128 xmm +// Construct and append a ANDNPS instruction to the active function. +func (c *Context) ANDNPS(mx, x operand.Op) { + if inst, err := x86.ANDNPS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ANDNPS: Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// ANDNPS xmm xmm +// ANDNPS m128 xmm +// Construct and append a ANDNPS instruction to the active function. +// Operates on the global context. +func ANDNPS(mx, x operand.Op) { ctx.ANDNPS(mx, x) } + +// ANDNQ: Logical AND NOT. +// +// Forms: +// +// ANDNQ r64 r64 r64 +// ANDNQ m64 r64 r64 +// Construct and append a ANDNQ instruction to the active function. +func (c *Context) ANDNQ(mr, r, r1 operand.Op) { + if inst, err := x86.ANDNQ(mr, r, r1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ANDNQ: Logical AND NOT. +// +// Forms: +// +// ANDNQ r64 r64 r64 +// ANDNQ m64 r64 r64 +// Construct and append a ANDNQ instruction to the active function. +// Operates on the global context. +func ANDNQ(mr, r, r1 operand.Op) { ctx.ANDNQ(mr, r, r1) } + +// ANDPD: Bitwise Logical AND of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// ANDPD xmm xmm +// ANDPD m128 xmm +// Construct and append a ANDPD instruction to the active function. +func (c *Context) ANDPD(mx, x operand.Op) { + if inst, err := x86.ANDPD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ANDPD: Bitwise Logical AND of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// ANDPD xmm xmm +// ANDPD m128 xmm +// Construct and append a ANDPD instruction to the active function. +// Operates on the global context. +func ANDPD(mx, x operand.Op) { ctx.ANDPD(mx, x) } + +// ANDPS: Bitwise Logical AND of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// ANDPS xmm xmm +// ANDPS m128 xmm +// Construct and append a ANDPS instruction to the active function. +func (c *Context) ANDPS(mx, x operand.Op) { + if inst, err := x86.ANDPS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ANDPS: Bitwise Logical AND of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// ANDPS xmm xmm +// ANDPS m128 xmm +// Construct and append a ANDPS instruction to the active function. +// Operates on the global context. +func ANDPS(mx, x operand.Op) { ctx.ANDPS(mx, x) } + +// ANDQ: Logical AND. +// +// Forms: +// +// ANDQ imm32 rax +// ANDQ imm8 r64 +// ANDQ imm32 r64 +// ANDQ r64 r64 +// ANDQ m64 r64 +// ANDQ imm8 m64 +// ANDQ imm32 m64 +// ANDQ r64 m64 +// Construct and append a ANDQ instruction to the active function. +func (c *Context) ANDQ(imr, mr operand.Op) { + if inst, err := x86.ANDQ(imr, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ANDQ: Logical AND. +// +// Forms: +// +// ANDQ imm32 rax +// ANDQ imm8 r64 +// ANDQ imm32 r64 +// ANDQ r64 r64 +// ANDQ m64 r64 +// ANDQ imm8 m64 +// ANDQ imm32 m64 +// ANDQ r64 m64 +// Construct and append a ANDQ instruction to the active function. +// Operates on the global context. +func ANDQ(imr, mr operand.Op) { ctx.ANDQ(imr, mr) } + +// ANDW: Logical AND. +// +// Forms: +// +// ANDW imm16 ax +// ANDW imm8 r16 +// ANDW imm16 r16 +// ANDW r16 r16 +// ANDW m16 r16 +// ANDW imm8 m16 +// ANDW imm16 m16 +// ANDW r16 m16 +// Construct and append a ANDW instruction to the active function. +func (c *Context) ANDW(imr, amr operand.Op) { + if inst, err := x86.ANDW(imr, amr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ANDW: Logical AND. +// +// Forms: +// +// ANDW imm16 ax +// ANDW imm8 r16 +// ANDW imm16 r16 +// ANDW r16 r16 +// ANDW m16 r16 +// ANDW imm8 m16 +// ANDW imm16 m16 +// ANDW r16 m16 +// Construct and append a ANDW instruction to the active function. +// Operates on the global context. +func ANDW(imr, amr operand.Op) { ctx.ANDW(imr, amr) } + +// BEXTRL: Bit Field Extract. +// +// Forms: +// +// BEXTRL r32 r32 r32 +// BEXTRL r32 m32 r32 +// Construct and append a BEXTRL instruction to the active function. +func (c *Context) BEXTRL(r, mr, r1 operand.Op) { + if inst, err := x86.BEXTRL(r, mr, r1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BEXTRL: Bit Field Extract. +// +// Forms: +// +// BEXTRL r32 r32 r32 +// BEXTRL r32 m32 r32 +// Construct and append a BEXTRL instruction to the active function. +// Operates on the global context. +func BEXTRL(r, mr, r1 operand.Op) { ctx.BEXTRL(r, mr, r1) } + +// BEXTRQ: Bit Field Extract. +// +// Forms: +// +// BEXTRQ r64 r64 r64 +// BEXTRQ r64 m64 r64 +// Construct and append a BEXTRQ instruction to the active function. +func (c *Context) BEXTRQ(r, mr, r1 operand.Op) { + if inst, err := x86.BEXTRQ(r, mr, r1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BEXTRQ: Bit Field Extract. +// +// Forms: +// +// BEXTRQ r64 r64 r64 +// BEXTRQ r64 m64 r64 +// Construct and append a BEXTRQ instruction to the active function. +// Operates on the global context. +func BEXTRQ(r, mr, r1 operand.Op) { ctx.BEXTRQ(r, mr, r1) } + +// BLENDPD: Blend Packed Double Precision Floating-Point Values. +// +// Forms: +// +// BLENDPD imm8 xmm xmm +// BLENDPD imm8 m128 xmm +// Construct and append a BLENDPD instruction to the active function. +func (c *Context) BLENDPD(i, mx, x operand.Op) { + if inst, err := x86.BLENDPD(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BLENDPD: Blend Packed Double Precision Floating-Point Values. +// +// Forms: +// +// BLENDPD imm8 xmm xmm +// BLENDPD imm8 m128 xmm +// Construct and append a BLENDPD instruction to the active function. +// Operates on the global context. +func BLENDPD(i, mx, x operand.Op) { ctx.BLENDPD(i, mx, x) } + +// BLENDPS: Blend Packed Single Precision Floating-Point Values. +// +// Forms: +// +// BLENDPS imm8 xmm xmm +// BLENDPS imm8 m128 xmm +// Construct and append a BLENDPS instruction to the active function. +func (c *Context) BLENDPS(i, mx, x operand.Op) { + if inst, err := x86.BLENDPS(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BLENDPS: Blend Packed Single Precision Floating-Point Values. +// +// Forms: +// +// BLENDPS imm8 xmm xmm +// BLENDPS imm8 m128 xmm +// Construct and append a BLENDPS instruction to the active function. +// Operates on the global context. +func BLENDPS(i, mx, x operand.Op) { ctx.BLENDPS(i, mx, x) } + +// BLENDVPD: Variable Blend Packed Double Precision Floating-Point Values. +// +// Forms: +// +// BLENDVPD xmm0 xmm xmm +// BLENDVPD xmm0 m128 xmm +// Construct and append a BLENDVPD instruction to the active function. +func (c *Context) BLENDVPD(x, mx, x1 operand.Op) { + if inst, err := x86.BLENDVPD(x, mx, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BLENDVPD: Variable Blend Packed Double Precision Floating-Point Values. +// +// Forms: +// +// BLENDVPD xmm0 xmm xmm +// BLENDVPD xmm0 m128 xmm +// Construct and append a BLENDVPD instruction to the active function. +// Operates on the global context. +func BLENDVPD(x, mx, x1 operand.Op) { ctx.BLENDVPD(x, mx, x1) } + +// BLENDVPS: Variable Blend Packed Single Precision Floating-Point Values. +// +// Forms: +// +// BLENDVPS xmm0 xmm xmm +// BLENDVPS xmm0 m128 xmm +// Construct and append a BLENDVPS instruction to the active function. +func (c *Context) BLENDVPS(x, mx, x1 operand.Op) { + if inst, err := x86.BLENDVPS(x, mx, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BLENDVPS: Variable Blend Packed Single Precision Floating-Point Values. +// +// Forms: +// +// BLENDVPS xmm0 xmm xmm +// BLENDVPS xmm0 m128 xmm +// Construct and append a BLENDVPS instruction to the active function. +// Operates on the global context. +func BLENDVPS(x, mx, x1 operand.Op) { ctx.BLENDVPS(x, mx, x1) } + +// BLSIL: Isolate Lowest Set Bit. +// +// Forms: +// +// BLSIL r32 r32 +// BLSIL m32 r32 +// Construct and append a BLSIL instruction to the active function. +func (c *Context) BLSIL(mr, r operand.Op) { + if inst, err := x86.BLSIL(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BLSIL: Isolate Lowest Set Bit. +// +// Forms: +// +// BLSIL r32 r32 +// BLSIL m32 r32 +// Construct and append a BLSIL instruction to the active function. +// Operates on the global context. +func BLSIL(mr, r operand.Op) { ctx.BLSIL(mr, r) } + +// BLSIQ: Isolate Lowest Set Bit. +// +// Forms: +// +// BLSIQ r64 r64 +// BLSIQ m64 r64 +// Construct and append a BLSIQ instruction to the active function. +func (c *Context) BLSIQ(mr, r operand.Op) { + if inst, err := x86.BLSIQ(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BLSIQ: Isolate Lowest Set Bit. +// +// Forms: +// +// BLSIQ r64 r64 +// BLSIQ m64 r64 +// Construct and append a BLSIQ instruction to the active function. +// Operates on the global context. +func BLSIQ(mr, r operand.Op) { ctx.BLSIQ(mr, r) } + +// BLSMSKL: Mask From Lowest Set Bit. +// +// Forms: +// +// BLSMSKL r32 r32 +// BLSMSKL m32 r32 +// Construct and append a BLSMSKL instruction to the active function. +func (c *Context) BLSMSKL(mr, r operand.Op) { + if inst, err := x86.BLSMSKL(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BLSMSKL: Mask From Lowest Set Bit. +// +// Forms: +// +// BLSMSKL r32 r32 +// BLSMSKL m32 r32 +// Construct and append a BLSMSKL instruction to the active function. +// Operates on the global context. +func BLSMSKL(mr, r operand.Op) { ctx.BLSMSKL(mr, r) } + +// BLSMSKQ: Mask From Lowest Set Bit. +// +// Forms: +// +// BLSMSKQ r64 r64 +// BLSMSKQ m64 r64 +// Construct and append a BLSMSKQ instruction to the active function. +func (c *Context) BLSMSKQ(mr, r operand.Op) { + if inst, err := x86.BLSMSKQ(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BLSMSKQ: Mask From Lowest Set Bit. +// +// Forms: +// +// BLSMSKQ r64 r64 +// BLSMSKQ m64 r64 +// Construct and append a BLSMSKQ instruction to the active function. +// Operates on the global context. +func BLSMSKQ(mr, r operand.Op) { ctx.BLSMSKQ(mr, r) } + +// BLSRL: Reset Lowest Set Bit. +// +// Forms: +// +// BLSRL r32 r32 +// BLSRL m32 r32 +// Construct and append a BLSRL instruction to the active function. +func (c *Context) BLSRL(mr, r operand.Op) { + if inst, err := x86.BLSRL(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BLSRL: Reset Lowest Set Bit. +// +// Forms: +// +// BLSRL r32 r32 +// BLSRL m32 r32 +// Construct and append a BLSRL instruction to the active function. +// Operates on the global context. +func BLSRL(mr, r operand.Op) { ctx.BLSRL(mr, r) } + +// BLSRQ: Reset Lowest Set Bit. +// +// Forms: +// +// BLSRQ r64 r64 +// BLSRQ m64 r64 +// Construct and append a BLSRQ instruction to the active function. +func (c *Context) BLSRQ(mr, r operand.Op) { + if inst, err := x86.BLSRQ(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BLSRQ: Reset Lowest Set Bit. +// +// Forms: +// +// BLSRQ r64 r64 +// BLSRQ m64 r64 +// Construct and append a BLSRQ instruction to the active function. +// Operates on the global context. +func BLSRQ(mr, r operand.Op) { ctx.BLSRQ(mr, r) } + +// BSFL: Bit Scan Forward. +// +// Forms: +// +// BSFL r32 r32 +// BSFL m32 r32 +// Construct and append a BSFL instruction to the active function. +func (c *Context) BSFL(mr, r operand.Op) { + if inst, err := x86.BSFL(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BSFL: Bit Scan Forward. +// +// Forms: +// +// BSFL r32 r32 +// BSFL m32 r32 +// Construct and append a BSFL instruction to the active function. +// Operates on the global context. +func BSFL(mr, r operand.Op) { ctx.BSFL(mr, r) } + +// BSFQ: Bit Scan Forward. +// +// Forms: +// +// BSFQ r64 r64 +// BSFQ m64 r64 +// Construct and append a BSFQ instruction to the active function. +func (c *Context) BSFQ(mr, r operand.Op) { + if inst, err := x86.BSFQ(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BSFQ: Bit Scan Forward. +// +// Forms: +// +// BSFQ r64 r64 +// BSFQ m64 r64 +// Construct and append a BSFQ instruction to the active function. +// Operates on the global context. +func BSFQ(mr, r operand.Op) { ctx.BSFQ(mr, r) } + +// BSFW: Bit Scan Forward. +// +// Forms: +// +// BSFW r16 r16 +// BSFW m16 r16 +// Construct and append a BSFW instruction to the active function. +func (c *Context) BSFW(mr, r operand.Op) { + if inst, err := x86.BSFW(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BSFW: Bit Scan Forward. +// +// Forms: +// +// BSFW r16 r16 +// BSFW m16 r16 +// Construct and append a BSFW instruction to the active function. +// Operates on the global context. +func BSFW(mr, r operand.Op) { ctx.BSFW(mr, r) } + +// BSRL: Bit Scan Reverse. +// +// Forms: +// +// BSRL r32 r32 +// BSRL m32 r32 +// Construct and append a BSRL instruction to the active function. +func (c *Context) BSRL(mr, r operand.Op) { + if inst, err := x86.BSRL(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BSRL: Bit Scan Reverse. +// +// Forms: +// +// BSRL r32 r32 +// BSRL m32 r32 +// Construct and append a BSRL instruction to the active function. +// Operates on the global context. +func BSRL(mr, r operand.Op) { ctx.BSRL(mr, r) } + +// BSRQ: Bit Scan Reverse. +// +// Forms: +// +// BSRQ r64 r64 +// BSRQ m64 r64 +// Construct and append a BSRQ instruction to the active function. +func (c *Context) BSRQ(mr, r operand.Op) { + if inst, err := x86.BSRQ(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BSRQ: Bit Scan Reverse. +// +// Forms: +// +// BSRQ r64 r64 +// BSRQ m64 r64 +// Construct and append a BSRQ instruction to the active function. +// Operates on the global context. +func BSRQ(mr, r operand.Op) { ctx.BSRQ(mr, r) } + +// BSRW: Bit Scan Reverse. +// +// Forms: +// +// BSRW r16 r16 +// BSRW m16 r16 +// Construct and append a BSRW instruction to the active function. +func (c *Context) BSRW(mr, r operand.Op) { + if inst, err := x86.BSRW(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BSRW: Bit Scan Reverse. +// +// Forms: +// +// BSRW r16 r16 +// BSRW m16 r16 +// Construct and append a BSRW instruction to the active function. +// Operates on the global context. +func BSRW(mr, r operand.Op) { ctx.BSRW(mr, r) } + +// BSWAPL: Byte Swap. +// +// Forms: +// +// BSWAPL r32 +// Construct and append a BSWAPL instruction to the active function. +func (c *Context) BSWAPL(r operand.Op) { + if inst, err := x86.BSWAPL(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BSWAPL: Byte Swap. +// +// Forms: +// +// BSWAPL r32 +// Construct and append a BSWAPL instruction to the active function. +// Operates on the global context. +func BSWAPL(r operand.Op) { ctx.BSWAPL(r) } + +// BSWAPQ: Byte Swap. +// +// Forms: +// +// BSWAPQ r64 +// Construct and append a BSWAPQ instruction to the active function. +func (c *Context) BSWAPQ(r operand.Op) { + if inst, err := x86.BSWAPQ(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BSWAPQ: Byte Swap. +// +// Forms: +// +// BSWAPQ r64 +// Construct and append a BSWAPQ instruction to the active function. +// Operates on the global context. +func BSWAPQ(r operand.Op) { ctx.BSWAPQ(r) } + +// BTCL: Bit Test and Complement. +// +// Forms: +// +// BTCL imm8 r32 +// BTCL r32 r32 +// BTCL imm8 m32 +// BTCL r32 m32 +// Construct and append a BTCL instruction to the active function. +func (c *Context) BTCL(ir, mr operand.Op) { + if inst, err := x86.BTCL(ir, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BTCL: Bit Test and Complement. +// +// Forms: +// +// BTCL imm8 r32 +// BTCL r32 r32 +// BTCL imm8 m32 +// BTCL r32 m32 +// Construct and append a BTCL instruction to the active function. +// Operates on the global context. +func BTCL(ir, mr operand.Op) { ctx.BTCL(ir, mr) } + +// BTCQ: Bit Test and Complement. +// +// Forms: +// +// BTCQ imm8 r64 +// BTCQ r64 r64 +// BTCQ imm8 m64 +// BTCQ r64 m64 +// Construct and append a BTCQ instruction to the active function. +func (c *Context) BTCQ(ir, mr operand.Op) { + if inst, err := x86.BTCQ(ir, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BTCQ: Bit Test and Complement. +// +// Forms: +// +// BTCQ imm8 r64 +// BTCQ r64 r64 +// BTCQ imm8 m64 +// BTCQ r64 m64 +// Construct and append a BTCQ instruction to the active function. +// Operates on the global context. +func BTCQ(ir, mr operand.Op) { ctx.BTCQ(ir, mr) } + +// BTCW: Bit Test and Complement. +// +// Forms: +// +// BTCW imm8 r16 +// BTCW r16 r16 +// BTCW imm8 m16 +// BTCW r16 m16 +// Construct and append a BTCW instruction to the active function. +func (c *Context) BTCW(ir, mr operand.Op) { + if inst, err := x86.BTCW(ir, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BTCW: Bit Test and Complement. +// +// Forms: +// +// BTCW imm8 r16 +// BTCW r16 r16 +// BTCW imm8 m16 +// BTCW r16 m16 +// Construct and append a BTCW instruction to the active function. +// Operates on the global context. +func BTCW(ir, mr operand.Op) { ctx.BTCW(ir, mr) } + +// BTL: Bit Test. +// +// Forms: +// +// BTL imm8 r32 +// BTL r32 r32 +// BTL imm8 m32 +// BTL r32 m32 +// Construct and append a BTL instruction to the active function. +func (c *Context) BTL(ir, mr operand.Op) { + if inst, err := x86.BTL(ir, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BTL: Bit Test. +// +// Forms: +// +// BTL imm8 r32 +// BTL r32 r32 +// BTL imm8 m32 +// BTL r32 m32 +// Construct and append a BTL instruction to the active function. +// Operates on the global context. +func BTL(ir, mr operand.Op) { ctx.BTL(ir, mr) } + +// BTQ: Bit Test. +// +// Forms: +// +// BTQ imm8 r64 +// BTQ r64 r64 +// BTQ imm8 m64 +// BTQ r64 m64 +// Construct and append a BTQ instruction to the active function. +func (c *Context) BTQ(ir, mr operand.Op) { + if inst, err := x86.BTQ(ir, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BTQ: Bit Test. +// +// Forms: +// +// BTQ imm8 r64 +// BTQ r64 r64 +// BTQ imm8 m64 +// BTQ r64 m64 +// Construct and append a BTQ instruction to the active function. +// Operates on the global context. +func BTQ(ir, mr operand.Op) { ctx.BTQ(ir, mr) } + +// BTRL: Bit Test and Reset. +// +// Forms: +// +// BTRL imm8 r32 +// BTRL r32 r32 +// BTRL imm8 m32 +// BTRL r32 m32 +// Construct and append a BTRL instruction to the active function. +func (c *Context) BTRL(ir, mr operand.Op) { + if inst, err := x86.BTRL(ir, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BTRL: Bit Test and Reset. +// +// Forms: +// +// BTRL imm8 r32 +// BTRL r32 r32 +// BTRL imm8 m32 +// BTRL r32 m32 +// Construct and append a BTRL instruction to the active function. +// Operates on the global context. +func BTRL(ir, mr operand.Op) { ctx.BTRL(ir, mr) } + +// BTRQ: Bit Test and Reset. +// +// Forms: +// +// BTRQ imm8 r64 +// BTRQ r64 r64 +// BTRQ imm8 m64 +// BTRQ r64 m64 +// Construct and append a BTRQ instruction to the active function. +func (c *Context) BTRQ(ir, mr operand.Op) { + if inst, err := x86.BTRQ(ir, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BTRQ: Bit Test and Reset. +// +// Forms: +// +// BTRQ imm8 r64 +// BTRQ r64 r64 +// BTRQ imm8 m64 +// BTRQ r64 m64 +// Construct and append a BTRQ instruction to the active function. +// Operates on the global context. +func BTRQ(ir, mr operand.Op) { ctx.BTRQ(ir, mr) } + +// BTRW: Bit Test and Reset. +// +// Forms: +// +// BTRW imm8 r16 +// BTRW r16 r16 +// BTRW imm8 m16 +// BTRW r16 m16 +// Construct and append a BTRW instruction to the active function. +func (c *Context) BTRW(ir, mr operand.Op) { + if inst, err := x86.BTRW(ir, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BTRW: Bit Test and Reset. +// +// Forms: +// +// BTRW imm8 r16 +// BTRW r16 r16 +// BTRW imm8 m16 +// BTRW r16 m16 +// Construct and append a BTRW instruction to the active function. +// Operates on the global context. +func BTRW(ir, mr operand.Op) { ctx.BTRW(ir, mr) } + +// BTSL: Bit Test and Set. +// +// Forms: +// +// BTSL imm8 r32 +// BTSL r32 r32 +// BTSL imm8 m32 +// BTSL r32 m32 +// Construct and append a BTSL instruction to the active function. +func (c *Context) BTSL(ir, mr operand.Op) { + if inst, err := x86.BTSL(ir, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BTSL: Bit Test and Set. +// +// Forms: +// +// BTSL imm8 r32 +// BTSL r32 r32 +// BTSL imm8 m32 +// BTSL r32 m32 +// Construct and append a BTSL instruction to the active function. +// Operates on the global context. +func BTSL(ir, mr operand.Op) { ctx.BTSL(ir, mr) } + +// BTSQ: Bit Test and Set. +// +// Forms: +// +// BTSQ imm8 r64 +// BTSQ r64 r64 +// BTSQ imm8 m64 +// BTSQ r64 m64 +// Construct and append a BTSQ instruction to the active function. +func (c *Context) BTSQ(ir, mr operand.Op) { + if inst, err := x86.BTSQ(ir, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BTSQ: Bit Test and Set. +// +// Forms: +// +// BTSQ imm8 r64 +// BTSQ r64 r64 +// BTSQ imm8 m64 +// BTSQ r64 m64 +// Construct and append a BTSQ instruction to the active function. +// Operates on the global context. +func BTSQ(ir, mr operand.Op) { ctx.BTSQ(ir, mr) } + +// BTSW: Bit Test and Set. +// +// Forms: +// +// BTSW imm8 r16 +// BTSW r16 r16 +// BTSW imm8 m16 +// BTSW r16 m16 +// Construct and append a BTSW instruction to the active function. +func (c *Context) BTSW(ir, mr operand.Op) { + if inst, err := x86.BTSW(ir, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BTSW: Bit Test and Set. +// +// Forms: +// +// BTSW imm8 r16 +// BTSW r16 r16 +// BTSW imm8 m16 +// BTSW r16 m16 +// Construct and append a BTSW instruction to the active function. +// Operates on the global context. +func BTSW(ir, mr operand.Op) { ctx.BTSW(ir, mr) } + +// BTW: Bit Test. +// +// Forms: +// +// BTW imm8 r16 +// BTW r16 r16 +// BTW imm8 m16 +// BTW r16 m16 +// Construct and append a BTW instruction to the active function. +func (c *Context) BTW(ir, mr operand.Op) { + if inst, err := x86.BTW(ir, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BTW: Bit Test. +// +// Forms: +// +// BTW imm8 r16 +// BTW r16 r16 +// BTW imm8 m16 +// BTW r16 m16 +// Construct and append a BTW instruction to the active function. +// Operates on the global context. +func BTW(ir, mr operand.Op) { ctx.BTW(ir, mr) } + +// BZHIL: Zero High Bits Starting with Specified Bit Position. +// +// Forms: +// +// BZHIL r32 r32 r32 +// BZHIL r32 m32 r32 +// Construct and append a BZHIL instruction to the active function. +func (c *Context) BZHIL(r, mr, r1 operand.Op) { + if inst, err := x86.BZHIL(r, mr, r1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BZHIL: Zero High Bits Starting with Specified Bit Position. +// +// Forms: +// +// BZHIL r32 r32 r32 +// BZHIL r32 m32 r32 +// Construct and append a BZHIL instruction to the active function. +// Operates on the global context. +func BZHIL(r, mr, r1 operand.Op) { ctx.BZHIL(r, mr, r1) } + +// BZHIQ: Zero High Bits Starting with Specified Bit Position. +// +// Forms: +// +// BZHIQ r64 r64 r64 +// BZHIQ r64 m64 r64 +// Construct and append a BZHIQ instruction to the active function. +func (c *Context) BZHIQ(r, mr, r1 operand.Op) { + if inst, err := x86.BZHIQ(r, mr, r1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// BZHIQ: Zero High Bits Starting with Specified Bit Position. +// +// Forms: +// +// BZHIQ r64 r64 r64 +// BZHIQ r64 m64 r64 +// Construct and append a BZHIQ instruction to the active function. +// Operates on the global context. +func BZHIQ(r, mr, r1 operand.Op) { ctx.BZHIQ(r, mr, r1) } + +// CALL: Call Procedure. +// +// Forms: +// +// CALL rel32 +// Construct and append a CALL instruction to the active function. +func (c *Context) CALL(r operand.Op) { + if inst, err := x86.CALL(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CALL: Call Procedure. +// +// Forms: +// +// CALL rel32 +// Construct and append a CALL instruction to the active function. +// Operates on the global context. +func CALL(r operand.Op) { ctx.CALL(r) } + +// CBW: Convert Byte to Word. +// +// Forms: +// +// CBW +// Construct and append a CBW instruction to the active function. +func (c *Context) CBW() { + if inst, err := x86.CBW(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CBW: Convert Byte to Word. +// +// Forms: +// +// CBW +// Construct and append a CBW instruction to the active function. +// Operates on the global context. +func CBW() { ctx.CBW() } + +// CDQ: Convert Doubleword to Quadword. +// +// Forms: +// +// CDQ +// Construct and append a CDQ instruction to the active function. +func (c *Context) CDQ() { + if inst, err := x86.CDQ(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CDQ: Convert Doubleword to Quadword. +// +// Forms: +// +// CDQ +// Construct and append a CDQ instruction to the active function. +// Operates on the global context. +func CDQ() { ctx.CDQ() } + +// CDQE: Convert Doubleword to Quadword. +// +// Forms: +// +// CDQE +// Construct and append a CDQE instruction to the active function. +func (c *Context) CDQE() { + if inst, err := x86.CDQE(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CDQE: Convert Doubleword to Quadword. +// +// Forms: +// +// CDQE +// Construct and append a CDQE instruction to the active function. +// Operates on the global context. +func CDQE() { ctx.CDQE() } + +// CLC: Clear Carry Flag. +// +// Forms: +// +// CLC +// Construct and append a CLC instruction to the active function. +func (c *Context) CLC() { + if inst, err := x86.CLC(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CLC: Clear Carry Flag. +// +// Forms: +// +// CLC +// Construct and append a CLC instruction to the active function. +// Operates on the global context. +func CLC() { ctx.CLC() } + +// CLD: Clear Direction Flag. +// +// Forms: +// +// CLD +// Construct and append a CLD instruction to the active function. +func (c *Context) CLD() { + if inst, err := x86.CLD(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CLD: Clear Direction Flag. +// +// Forms: +// +// CLD +// Construct and append a CLD instruction to the active function. +// Operates on the global context. +func CLD() { ctx.CLD() } + +// CLFLUSH: Flush Cache Line. +// +// Forms: +// +// CLFLUSH m8 +// Construct and append a CLFLUSH instruction to the active function. +func (c *Context) CLFLUSH(m operand.Op) { + if inst, err := x86.CLFLUSH(m); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CLFLUSH: Flush Cache Line. +// +// Forms: +// +// CLFLUSH m8 +// Construct and append a CLFLUSH instruction to the active function. +// Operates on the global context. +func CLFLUSH(m operand.Op) { ctx.CLFLUSH(m) } + +// CLFLUSHOPT: Flush Cache Line Optimized. +// +// Forms: +// +// CLFLUSHOPT m8 +// Construct and append a CLFLUSHOPT instruction to the active function. +func (c *Context) CLFLUSHOPT(m operand.Op) { + if inst, err := x86.CLFLUSHOPT(m); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CLFLUSHOPT: Flush Cache Line Optimized. +// +// Forms: +// +// CLFLUSHOPT m8 +// Construct and append a CLFLUSHOPT instruction to the active function. +// Operates on the global context. +func CLFLUSHOPT(m operand.Op) { ctx.CLFLUSHOPT(m) } + +// CMC: Complement Carry Flag. +// +// Forms: +// +// CMC +// Construct and append a CMC instruction to the active function. +func (c *Context) CMC() { + if inst, err := x86.CMC(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMC: Complement Carry Flag. +// +// Forms: +// +// CMC +// Construct and append a CMC instruction to the active function. +// Operates on the global context. +func CMC() { ctx.CMC() } + +// CMOVLCC: Move if above or equal (CF == 0). +// +// Forms: +// +// CMOVLCC r32 r32 +// CMOVLCC m32 r32 +// Construct and append a CMOVLCC instruction to the active function. +func (c *Context) CMOVLCC(mr, r operand.Op) { + if inst, err := x86.CMOVLCC(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVLCC: Move if above or equal (CF == 0). +// +// Forms: +// +// CMOVLCC r32 r32 +// CMOVLCC m32 r32 +// Construct and append a CMOVLCC instruction to the active function. +// Operates on the global context. +func CMOVLCC(mr, r operand.Op) { ctx.CMOVLCC(mr, r) } + +// CMOVLCS: Move if below (CF == 1). +// +// Forms: +// +// CMOVLCS r32 r32 +// CMOVLCS m32 r32 +// Construct and append a CMOVLCS instruction to the active function. +func (c *Context) CMOVLCS(mr, r operand.Op) { + if inst, err := x86.CMOVLCS(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVLCS: Move if below (CF == 1). +// +// Forms: +// +// CMOVLCS r32 r32 +// CMOVLCS m32 r32 +// Construct and append a CMOVLCS instruction to the active function. +// Operates on the global context. +func CMOVLCS(mr, r operand.Op) { ctx.CMOVLCS(mr, r) } + +// CMOVLEQ: Move if equal (ZF == 1). +// +// Forms: +// +// CMOVLEQ r32 r32 +// CMOVLEQ m32 r32 +// Construct and append a CMOVLEQ instruction to the active function. +func (c *Context) CMOVLEQ(mr, r operand.Op) { + if inst, err := x86.CMOVLEQ(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVLEQ: Move if equal (ZF == 1). +// +// Forms: +// +// CMOVLEQ r32 r32 +// CMOVLEQ m32 r32 +// Construct and append a CMOVLEQ instruction to the active function. +// Operates on the global context. +func CMOVLEQ(mr, r operand.Op) { ctx.CMOVLEQ(mr, r) } + +// CMOVLGE: Move if greater or equal (SF == OF). +// +// Forms: +// +// CMOVLGE r32 r32 +// CMOVLGE m32 r32 +// Construct and append a CMOVLGE instruction to the active function. +func (c *Context) CMOVLGE(mr, r operand.Op) { + if inst, err := x86.CMOVLGE(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVLGE: Move if greater or equal (SF == OF). +// +// Forms: +// +// CMOVLGE r32 r32 +// CMOVLGE m32 r32 +// Construct and append a CMOVLGE instruction to the active function. +// Operates on the global context. +func CMOVLGE(mr, r operand.Op) { ctx.CMOVLGE(mr, r) } + +// CMOVLGT: Move if greater (ZF == 0 and SF == OF). +// +// Forms: +// +// CMOVLGT r32 r32 +// CMOVLGT m32 r32 +// Construct and append a CMOVLGT instruction to the active function. +func (c *Context) CMOVLGT(mr, r operand.Op) { + if inst, err := x86.CMOVLGT(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVLGT: Move if greater (ZF == 0 and SF == OF). +// +// Forms: +// +// CMOVLGT r32 r32 +// CMOVLGT m32 r32 +// Construct and append a CMOVLGT instruction to the active function. +// Operates on the global context. +func CMOVLGT(mr, r operand.Op) { ctx.CMOVLGT(mr, r) } + +// CMOVLHI: Move if above (CF == 0 and ZF == 0). +// +// Forms: +// +// CMOVLHI r32 r32 +// CMOVLHI m32 r32 +// Construct and append a CMOVLHI instruction to the active function. +func (c *Context) CMOVLHI(mr, r operand.Op) { + if inst, err := x86.CMOVLHI(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVLHI: Move if above (CF == 0 and ZF == 0). +// +// Forms: +// +// CMOVLHI r32 r32 +// CMOVLHI m32 r32 +// Construct and append a CMOVLHI instruction to the active function. +// Operates on the global context. +func CMOVLHI(mr, r operand.Op) { ctx.CMOVLHI(mr, r) } + +// CMOVLLE: Move if less or equal (ZF == 1 or SF != OF). +// +// Forms: +// +// CMOVLLE r32 r32 +// CMOVLLE m32 r32 +// Construct and append a CMOVLLE instruction to the active function. +func (c *Context) CMOVLLE(mr, r operand.Op) { + if inst, err := x86.CMOVLLE(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVLLE: Move if less or equal (ZF == 1 or SF != OF). +// +// Forms: +// +// CMOVLLE r32 r32 +// CMOVLLE m32 r32 +// Construct and append a CMOVLLE instruction to the active function. +// Operates on the global context. +func CMOVLLE(mr, r operand.Op) { ctx.CMOVLLE(mr, r) } + +// CMOVLLS: Move if below or equal (CF == 1 or ZF == 1). +// +// Forms: +// +// CMOVLLS r32 r32 +// CMOVLLS m32 r32 +// Construct and append a CMOVLLS instruction to the active function. +func (c *Context) CMOVLLS(mr, r operand.Op) { + if inst, err := x86.CMOVLLS(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVLLS: Move if below or equal (CF == 1 or ZF == 1). +// +// Forms: +// +// CMOVLLS r32 r32 +// CMOVLLS m32 r32 +// Construct and append a CMOVLLS instruction to the active function. +// Operates on the global context. +func CMOVLLS(mr, r operand.Op) { ctx.CMOVLLS(mr, r) } + +// CMOVLLT: Move if less (SF != OF). +// +// Forms: +// +// CMOVLLT r32 r32 +// CMOVLLT m32 r32 +// Construct and append a CMOVLLT instruction to the active function. +func (c *Context) CMOVLLT(mr, r operand.Op) { + if inst, err := x86.CMOVLLT(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVLLT: Move if less (SF != OF). +// +// Forms: +// +// CMOVLLT r32 r32 +// CMOVLLT m32 r32 +// Construct and append a CMOVLLT instruction to the active function. +// Operates on the global context. +func CMOVLLT(mr, r operand.Op) { ctx.CMOVLLT(mr, r) } + +// CMOVLMI: Move if sign (SF == 1). +// +// Forms: +// +// CMOVLMI r32 r32 +// CMOVLMI m32 r32 +// Construct and append a CMOVLMI instruction to the active function. +func (c *Context) CMOVLMI(mr, r operand.Op) { + if inst, err := x86.CMOVLMI(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVLMI: Move if sign (SF == 1). +// +// Forms: +// +// CMOVLMI r32 r32 +// CMOVLMI m32 r32 +// Construct and append a CMOVLMI instruction to the active function. +// Operates on the global context. +func CMOVLMI(mr, r operand.Op) { ctx.CMOVLMI(mr, r) } + +// CMOVLNE: Move if not equal (ZF == 0). +// +// Forms: +// +// CMOVLNE r32 r32 +// CMOVLNE m32 r32 +// Construct and append a CMOVLNE instruction to the active function. +func (c *Context) CMOVLNE(mr, r operand.Op) { + if inst, err := x86.CMOVLNE(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVLNE: Move if not equal (ZF == 0). +// +// Forms: +// +// CMOVLNE r32 r32 +// CMOVLNE m32 r32 +// Construct and append a CMOVLNE instruction to the active function. +// Operates on the global context. +func CMOVLNE(mr, r operand.Op) { ctx.CMOVLNE(mr, r) } + +// CMOVLOC: Move if not overflow (OF == 0). +// +// Forms: +// +// CMOVLOC r32 r32 +// CMOVLOC m32 r32 +// Construct and append a CMOVLOC instruction to the active function. +func (c *Context) CMOVLOC(mr, r operand.Op) { + if inst, err := x86.CMOVLOC(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVLOC: Move if not overflow (OF == 0). +// +// Forms: +// +// CMOVLOC r32 r32 +// CMOVLOC m32 r32 +// Construct and append a CMOVLOC instruction to the active function. +// Operates on the global context. +func CMOVLOC(mr, r operand.Op) { ctx.CMOVLOC(mr, r) } + +// CMOVLOS: Move if overflow (OF == 1). +// +// Forms: +// +// CMOVLOS r32 r32 +// CMOVLOS m32 r32 +// Construct and append a CMOVLOS instruction to the active function. +func (c *Context) CMOVLOS(mr, r operand.Op) { + if inst, err := x86.CMOVLOS(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVLOS: Move if overflow (OF == 1). +// +// Forms: +// +// CMOVLOS r32 r32 +// CMOVLOS m32 r32 +// Construct and append a CMOVLOS instruction to the active function. +// Operates on the global context. +func CMOVLOS(mr, r operand.Op) { ctx.CMOVLOS(mr, r) } + +// CMOVLPC: Move if not parity (PF == 0). +// +// Forms: +// +// CMOVLPC r32 r32 +// CMOVLPC m32 r32 +// Construct and append a CMOVLPC instruction to the active function. +func (c *Context) CMOVLPC(mr, r operand.Op) { + if inst, err := x86.CMOVLPC(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVLPC: Move if not parity (PF == 0). +// +// Forms: +// +// CMOVLPC r32 r32 +// CMOVLPC m32 r32 +// Construct and append a CMOVLPC instruction to the active function. +// Operates on the global context. +func CMOVLPC(mr, r operand.Op) { ctx.CMOVLPC(mr, r) } + +// CMOVLPL: Move if not sign (SF == 0). +// +// Forms: +// +// CMOVLPL r32 r32 +// CMOVLPL m32 r32 +// Construct and append a CMOVLPL instruction to the active function. +func (c *Context) CMOVLPL(mr, r operand.Op) { + if inst, err := x86.CMOVLPL(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVLPL: Move if not sign (SF == 0). +// +// Forms: +// +// CMOVLPL r32 r32 +// CMOVLPL m32 r32 +// Construct and append a CMOVLPL instruction to the active function. +// Operates on the global context. +func CMOVLPL(mr, r operand.Op) { ctx.CMOVLPL(mr, r) } + +// CMOVLPS: Move if parity (PF == 1). +// +// Forms: +// +// CMOVLPS r32 r32 +// CMOVLPS m32 r32 +// Construct and append a CMOVLPS instruction to the active function. +func (c *Context) CMOVLPS(mr, r operand.Op) { + if inst, err := x86.CMOVLPS(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVLPS: Move if parity (PF == 1). +// +// Forms: +// +// CMOVLPS r32 r32 +// CMOVLPS m32 r32 +// Construct and append a CMOVLPS instruction to the active function. +// Operates on the global context. +func CMOVLPS(mr, r operand.Op) { ctx.CMOVLPS(mr, r) } + +// CMOVQCC: Move if above or equal (CF == 0). +// +// Forms: +// +// CMOVQCC r64 r64 +// CMOVQCC m64 r64 +// Construct and append a CMOVQCC instruction to the active function. +func (c *Context) CMOVQCC(mr, r operand.Op) { + if inst, err := x86.CMOVQCC(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVQCC: Move if above or equal (CF == 0). +// +// Forms: +// +// CMOVQCC r64 r64 +// CMOVQCC m64 r64 +// Construct and append a CMOVQCC instruction to the active function. +// Operates on the global context. +func CMOVQCC(mr, r operand.Op) { ctx.CMOVQCC(mr, r) } + +// CMOVQCS: Move if below (CF == 1). +// +// Forms: +// +// CMOVQCS r64 r64 +// CMOVQCS m64 r64 +// Construct and append a CMOVQCS instruction to the active function. +func (c *Context) CMOVQCS(mr, r operand.Op) { + if inst, err := x86.CMOVQCS(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVQCS: Move if below (CF == 1). +// +// Forms: +// +// CMOVQCS r64 r64 +// CMOVQCS m64 r64 +// Construct and append a CMOVQCS instruction to the active function. +// Operates on the global context. +func CMOVQCS(mr, r operand.Op) { ctx.CMOVQCS(mr, r) } + +// CMOVQEQ: Move if equal (ZF == 1). +// +// Forms: +// +// CMOVQEQ r64 r64 +// CMOVQEQ m64 r64 +// Construct and append a CMOVQEQ instruction to the active function. +func (c *Context) CMOVQEQ(mr, r operand.Op) { + if inst, err := x86.CMOVQEQ(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVQEQ: Move if equal (ZF == 1). +// +// Forms: +// +// CMOVQEQ r64 r64 +// CMOVQEQ m64 r64 +// Construct and append a CMOVQEQ instruction to the active function. +// Operates on the global context. +func CMOVQEQ(mr, r operand.Op) { ctx.CMOVQEQ(mr, r) } + +// CMOVQGE: Move if greater or equal (SF == OF). +// +// Forms: +// +// CMOVQGE r64 r64 +// CMOVQGE m64 r64 +// Construct and append a CMOVQGE instruction to the active function. +func (c *Context) CMOVQGE(mr, r operand.Op) { + if inst, err := x86.CMOVQGE(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVQGE: Move if greater or equal (SF == OF). +// +// Forms: +// +// CMOVQGE r64 r64 +// CMOVQGE m64 r64 +// Construct and append a CMOVQGE instruction to the active function. +// Operates on the global context. +func CMOVQGE(mr, r operand.Op) { ctx.CMOVQGE(mr, r) } + +// CMOVQGT: Move if greater (ZF == 0 and SF == OF). +// +// Forms: +// +// CMOVQGT r64 r64 +// CMOVQGT m64 r64 +// Construct and append a CMOVQGT instruction to the active function. +func (c *Context) CMOVQGT(mr, r operand.Op) { + if inst, err := x86.CMOVQGT(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVQGT: Move if greater (ZF == 0 and SF == OF). +// +// Forms: +// +// CMOVQGT r64 r64 +// CMOVQGT m64 r64 +// Construct and append a CMOVQGT instruction to the active function. +// Operates on the global context. +func CMOVQGT(mr, r operand.Op) { ctx.CMOVQGT(mr, r) } + +// CMOVQHI: Move if above (CF == 0 and ZF == 0). +// +// Forms: +// +// CMOVQHI r64 r64 +// CMOVQHI m64 r64 +// Construct and append a CMOVQHI instruction to the active function. +func (c *Context) CMOVQHI(mr, r operand.Op) { + if inst, err := x86.CMOVQHI(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVQHI: Move if above (CF == 0 and ZF == 0). +// +// Forms: +// +// CMOVQHI r64 r64 +// CMOVQHI m64 r64 +// Construct and append a CMOVQHI instruction to the active function. +// Operates on the global context. +func CMOVQHI(mr, r operand.Op) { ctx.CMOVQHI(mr, r) } + +// CMOVQLE: Move if less or equal (ZF == 1 or SF != OF). +// +// Forms: +// +// CMOVQLE r64 r64 +// CMOVQLE m64 r64 +// Construct and append a CMOVQLE instruction to the active function. +func (c *Context) CMOVQLE(mr, r operand.Op) { + if inst, err := x86.CMOVQLE(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVQLE: Move if less or equal (ZF == 1 or SF != OF). +// +// Forms: +// +// CMOVQLE r64 r64 +// CMOVQLE m64 r64 +// Construct and append a CMOVQLE instruction to the active function. +// Operates on the global context. +func CMOVQLE(mr, r operand.Op) { ctx.CMOVQLE(mr, r) } + +// CMOVQLS: Move if below or equal (CF == 1 or ZF == 1). +// +// Forms: +// +// CMOVQLS r64 r64 +// CMOVQLS m64 r64 +// Construct and append a CMOVQLS instruction to the active function. +func (c *Context) CMOVQLS(mr, r operand.Op) { + if inst, err := x86.CMOVQLS(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVQLS: Move if below or equal (CF == 1 or ZF == 1). +// +// Forms: +// +// CMOVQLS r64 r64 +// CMOVQLS m64 r64 +// Construct and append a CMOVQLS instruction to the active function. +// Operates on the global context. +func CMOVQLS(mr, r operand.Op) { ctx.CMOVQLS(mr, r) } + +// CMOVQLT: Move if less (SF != OF). +// +// Forms: +// +// CMOVQLT r64 r64 +// CMOVQLT m64 r64 +// Construct and append a CMOVQLT instruction to the active function. +func (c *Context) CMOVQLT(mr, r operand.Op) { + if inst, err := x86.CMOVQLT(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVQLT: Move if less (SF != OF). +// +// Forms: +// +// CMOVQLT r64 r64 +// CMOVQLT m64 r64 +// Construct and append a CMOVQLT instruction to the active function. +// Operates on the global context. +func CMOVQLT(mr, r operand.Op) { ctx.CMOVQLT(mr, r) } + +// CMOVQMI: Move if sign (SF == 1). +// +// Forms: +// +// CMOVQMI r64 r64 +// CMOVQMI m64 r64 +// Construct and append a CMOVQMI instruction to the active function. +func (c *Context) CMOVQMI(mr, r operand.Op) { + if inst, err := x86.CMOVQMI(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVQMI: Move if sign (SF == 1). +// +// Forms: +// +// CMOVQMI r64 r64 +// CMOVQMI m64 r64 +// Construct and append a CMOVQMI instruction to the active function. +// Operates on the global context. +func CMOVQMI(mr, r operand.Op) { ctx.CMOVQMI(mr, r) } + +// CMOVQNE: Move if not equal (ZF == 0). +// +// Forms: +// +// CMOVQNE r64 r64 +// CMOVQNE m64 r64 +// Construct and append a CMOVQNE instruction to the active function. +func (c *Context) CMOVQNE(mr, r operand.Op) { + if inst, err := x86.CMOVQNE(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVQNE: Move if not equal (ZF == 0). +// +// Forms: +// +// CMOVQNE r64 r64 +// CMOVQNE m64 r64 +// Construct and append a CMOVQNE instruction to the active function. +// Operates on the global context. +func CMOVQNE(mr, r operand.Op) { ctx.CMOVQNE(mr, r) } + +// CMOVQOC: Move if not overflow (OF == 0). +// +// Forms: +// +// CMOVQOC r64 r64 +// CMOVQOC m64 r64 +// Construct and append a CMOVQOC instruction to the active function. +func (c *Context) CMOVQOC(mr, r operand.Op) { + if inst, err := x86.CMOVQOC(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVQOC: Move if not overflow (OF == 0). +// +// Forms: +// +// CMOVQOC r64 r64 +// CMOVQOC m64 r64 +// Construct and append a CMOVQOC instruction to the active function. +// Operates on the global context. +func CMOVQOC(mr, r operand.Op) { ctx.CMOVQOC(mr, r) } + +// CMOVQOS: Move if overflow (OF == 1). +// +// Forms: +// +// CMOVQOS r64 r64 +// CMOVQOS m64 r64 +// Construct and append a CMOVQOS instruction to the active function. +func (c *Context) CMOVQOS(mr, r operand.Op) { + if inst, err := x86.CMOVQOS(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVQOS: Move if overflow (OF == 1). +// +// Forms: +// +// CMOVQOS r64 r64 +// CMOVQOS m64 r64 +// Construct and append a CMOVQOS instruction to the active function. +// Operates on the global context. +func CMOVQOS(mr, r operand.Op) { ctx.CMOVQOS(mr, r) } + +// CMOVQPC: Move if not parity (PF == 0). +// +// Forms: +// +// CMOVQPC r64 r64 +// CMOVQPC m64 r64 +// Construct and append a CMOVQPC instruction to the active function. +func (c *Context) CMOVQPC(mr, r operand.Op) { + if inst, err := x86.CMOVQPC(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVQPC: Move if not parity (PF == 0). +// +// Forms: +// +// CMOVQPC r64 r64 +// CMOVQPC m64 r64 +// Construct and append a CMOVQPC instruction to the active function. +// Operates on the global context. +func CMOVQPC(mr, r operand.Op) { ctx.CMOVQPC(mr, r) } + +// CMOVQPL: Move if not sign (SF == 0). +// +// Forms: +// +// CMOVQPL r64 r64 +// CMOVQPL m64 r64 +// Construct and append a CMOVQPL instruction to the active function. +func (c *Context) CMOVQPL(mr, r operand.Op) { + if inst, err := x86.CMOVQPL(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVQPL: Move if not sign (SF == 0). +// +// Forms: +// +// CMOVQPL r64 r64 +// CMOVQPL m64 r64 +// Construct and append a CMOVQPL instruction to the active function. +// Operates on the global context. +func CMOVQPL(mr, r operand.Op) { ctx.CMOVQPL(mr, r) } + +// CMOVQPS: Move if parity (PF == 1). +// +// Forms: +// +// CMOVQPS r64 r64 +// CMOVQPS m64 r64 +// Construct and append a CMOVQPS instruction to the active function. +func (c *Context) CMOVQPS(mr, r operand.Op) { + if inst, err := x86.CMOVQPS(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVQPS: Move if parity (PF == 1). +// +// Forms: +// +// CMOVQPS r64 r64 +// CMOVQPS m64 r64 +// Construct and append a CMOVQPS instruction to the active function. +// Operates on the global context. +func CMOVQPS(mr, r operand.Op) { ctx.CMOVQPS(mr, r) } + +// CMOVWCC: Move if above or equal (CF == 0). +// +// Forms: +// +// CMOVWCC r16 r16 +// CMOVWCC m16 r16 +// Construct and append a CMOVWCC instruction to the active function. +func (c *Context) CMOVWCC(mr, r operand.Op) { + if inst, err := x86.CMOVWCC(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVWCC: Move if above or equal (CF == 0). +// +// Forms: +// +// CMOVWCC r16 r16 +// CMOVWCC m16 r16 +// Construct and append a CMOVWCC instruction to the active function. +// Operates on the global context. +func CMOVWCC(mr, r operand.Op) { ctx.CMOVWCC(mr, r) } + +// CMOVWCS: Move if below (CF == 1). +// +// Forms: +// +// CMOVWCS r16 r16 +// CMOVWCS m16 r16 +// Construct and append a CMOVWCS instruction to the active function. +func (c *Context) CMOVWCS(mr, r operand.Op) { + if inst, err := x86.CMOVWCS(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVWCS: Move if below (CF == 1). +// +// Forms: +// +// CMOVWCS r16 r16 +// CMOVWCS m16 r16 +// Construct and append a CMOVWCS instruction to the active function. +// Operates on the global context. +func CMOVWCS(mr, r operand.Op) { ctx.CMOVWCS(mr, r) } + +// CMOVWEQ: Move if equal (ZF == 1). +// +// Forms: +// +// CMOVWEQ r16 r16 +// CMOVWEQ m16 r16 +// Construct and append a CMOVWEQ instruction to the active function. +func (c *Context) CMOVWEQ(mr, r operand.Op) { + if inst, err := x86.CMOVWEQ(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVWEQ: Move if equal (ZF == 1). +// +// Forms: +// +// CMOVWEQ r16 r16 +// CMOVWEQ m16 r16 +// Construct and append a CMOVWEQ instruction to the active function. +// Operates on the global context. +func CMOVWEQ(mr, r operand.Op) { ctx.CMOVWEQ(mr, r) } + +// CMOVWGE: Move if greater or equal (SF == OF). +// +// Forms: +// +// CMOVWGE r16 r16 +// CMOVWGE m16 r16 +// Construct and append a CMOVWGE instruction to the active function. +func (c *Context) CMOVWGE(mr, r operand.Op) { + if inst, err := x86.CMOVWGE(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVWGE: Move if greater or equal (SF == OF). +// +// Forms: +// +// CMOVWGE r16 r16 +// CMOVWGE m16 r16 +// Construct and append a CMOVWGE instruction to the active function. +// Operates on the global context. +func CMOVWGE(mr, r operand.Op) { ctx.CMOVWGE(mr, r) } + +// CMOVWGT: Move if greater (ZF == 0 and SF == OF). +// +// Forms: +// +// CMOVWGT r16 r16 +// CMOVWGT m16 r16 +// Construct and append a CMOVWGT instruction to the active function. +func (c *Context) CMOVWGT(mr, r operand.Op) { + if inst, err := x86.CMOVWGT(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVWGT: Move if greater (ZF == 0 and SF == OF). +// +// Forms: +// +// CMOVWGT r16 r16 +// CMOVWGT m16 r16 +// Construct and append a CMOVWGT instruction to the active function. +// Operates on the global context. +func CMOVWGT(mr, r operand.Op) { ctx.CMOVWGT(mr, r) } + +// CMOVWHI: Move if above (CF == 0 and ZF == 0). +// +// Forms: +// +// CMOVWHI r16 r16 +// CMOVWHI m16 r16 +// Construct and append a CMOVWHI instruction to the active function. +func (c *Context) CMOVWHI(mr, r operand.Op) { + if inst, err := x86.CMOVWHI(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVWHI: Move if above (CF == 0 and ZF == 0). +// +// Forms: +// +// CMOVWHI r16 r16 +// CMOVWHI m16 r16 +// Construct and append a CMOVWHI instruction to the active function. +// Operates on the global context. +func CMOVWHI(mr, r operand.Op) { ctx.CMOVWHI(mr, r) } + +// CMOVWLE: Move if less or equal (ZF == 1 or SF != OF). +// +// Forms: +// +// CMOVWLE r16 r16 +// CMOVWLE m16 r16 +// Construct and append a CMOVWLE instruction to the active function. +func (c *Context) CMOVWLE(mr, r operand.Op) { + if inst, err := x86.CMOVWLE(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVWLE: Move if less or equal (ZF == 1 or SF != OF). +// +// Forms: +// +// CMOVWLE r16 r16 +// CMOVWLE m16 r16 +// Construct and append a CMOVWLE instruction to the active function. +// Operates on the global context. +func CMOVWLE(mr, r operand.Op) { ctx.CMOVWLE(mr, r) } + +// CMOVWLS: Move if below or equal (CF == 1 or ZF == 1). +// +// Forms: +// +// CMOVWLS r16 r16 +// CMOVWLS m16 r16 +// Construct and append a CMOVWLS instruction to the active function. +func (c *Context) CMOVWLS(mr, r operand.Op) { + if inst, err := x86.CMOVWLS(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVWLS: Move if below or equal (CF == 1 or ZF == 1). +// +// Forms: +// +// CMOVWLS r16 r16 +// CMOVWLS m16 r16 +// Construct and append a CMOVWLS instruction to the active function. +// Operates on the global context. +func CMOVWLS(mr, r operand.Op) { ctx.CMOVWLS(mr, r) } + +// CMOVWLT: Move if less (SF != OF). +// +// Forms: +// +// CMOVWLT r16 r16 +// CMOVWLT m16 r16 +// Construct and append a CMOVWLT instruction to the active function. +func (c *Context) CMOVWLT(mr, r operand.Op) { + if inst, err := x86.CMOVWLT(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVWLT: Move if less (SF != OF). +// +// Forms: +// +// CMOVWLT r16 r16 +// CMOVWLT m16 r16 +// Construct and append a CMOVWLT instruction to the active function. +// Operates on the global context. +func CMOVWLT(mr, r operand.Op) { ctx.CMOVWLT(mr, r) } + +// CMOVWMI: Move if sign (SF == 1). +// +// Forms: +// +// CMOVWMI r16 r16 +// CMOVWMI m16 r16 +// Construct and append a CMOVWMI instruction to the active function. +func (c *Context) CMOVWMI(mr, r operand.Op) { + if inst, err := x86.CMOVWMI(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVWMI: Move if sign (SF == 1). +// +// Forms: +// +// CMOVWMI r16 r16 +// CMOVWMI m16 r16 +// Construct and append a CMOVWMI instruction to the active function. +// Operates on the global context. +func CMOVWMI(mr, r operand.Op) { ctx.CMOVWMI(mr, r) } + +// CMOVWNE: Move if not equal (ZF == 0). +// +// Forms: +// +// CMOVWNE r16 r16 +// CMOVWNE m16 r16 +// Construct and append a CMOVWNE instruction to the active function. +func (c *Context) CMOVWNE(mr, r operand.Op) { + if inst, err := x86.CMOVWNE(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVWNE: Move if not equal (ZF == 0). +// +// Forms: +// +// CMOVWNE r16 r16 +// CMOVWNE m16 r16 +// Construct and append a CMOVWNE instruction to the active function. +// Operates on the global context. +func CMOVWNE(mr, r operand.Op) { ctx.CMOVWNE(mr, r) } + +// CMOVWOC: Move if not overflow (OF == 0). +// +// Forms: +// +// CMOVWOC r16 r16 +// CMOVWOC m16 r16 +// Construct and append a CMOVWOC instruction to the active function. +func (c *Context) CMOVWOC(mr, r operand.Op) { + if inst, err := x86.CMOVWOC(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVWOC: Move if not overflow (OF == 0). +// +// Forms: +// +// CMOVWOC r16 r16 +// CMOVWOC m16 r16 +// Construct and append a CMOVWOC instruction to the active function. +// Operates on the global context. +func CMOVWOC(mr, r operand.Op) { ctx.CMOVWOC(mr, r) } + +// CMOVWOS: Move if overflow (OF == 1). +// +// Forms: +// +// CMOVWOS r16 r16 +// CMOVWOS m16 r16 +// Construct and append a CMOVWOS instruction to the active function. +func (c *Context) CMOVWOS(mr, r operand.Op) { + if inst, err := x86.CMOVWOS(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVWOS: Move if overflow (OF == 1). +// +// Forms: +// +// CMOVWOS r16 r16 +// CMOVWOS m16 r16 +// Construct and append a CMOVWOS instruction to the active function. +// Operates on the global context. +func CMOVWOS(mr, r operand.Op) { ctx.CMOVWOS(mr, r) } + +// CMOVWPC: Move if not parity (PF == 0). +// +// Forms: +// +// CMOVWPC r16 r16 +// CMOVWPC m16 r16 +// Construct and append a CMOVWPC instruction to the active function. +func (c *Context) CMOVWPC(mr, r operand.Op) { + if inst, err := x86.CMOVWPC(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVWPC: Move if not parity (PF == 0). +// +// Forms: +// +// CMOVWPC r16 r16 +// CMOVWPC m16 r16 +// Construct and append a CMOVWPC instruction to the active function. +// Operates on the global context. +func CMOVWPC(mr, r operand.Op) { ctx.CMOVWPC(mr, r) } + +// CMOVWPL: Move if not sign (SF == 0). +// +// Forms: +// +// CMOVWPL r16 r16 +// CMOVWPL m16 r16 +// Construct and append a CMOVWPL instruction to the active function. +func (c *Context) CMOVWPL(mr, r operand.Op) { + if inst, err := x86.CMOVWPL(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVWPL: Move if not sign (SF == 0). +// +// Forms: +// +// CMOVWPL r16 r16 +// CMOVWPL m16 r16 +// Construct and append a CMOVWPL instruction to the active function. +// Operates on the global context. +func CMOVWPL(mr, r operand.Op) { ctx.CMOVWPL(mr, r) } + +// CMOVWPS: Move if parity (PF == 1). +// +// Forms: +// +// CMOVWPS r16 r16 +// CMOVWPS m16 r16 +// Construct and append a CMOVWPS instruction to the active function. +func (c *Context) CMOVWPS(mr, r operand.Op) { + if inst, err := x86.CMOVWPS(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMOVWPS: Move if parity (PF == 1). +// +// Forms: +// +// CMOVWPS r16 r16 +// CMOVWPS m16 r16 +// Construct and append a CMOVWPS instruction to the active function. +// Operates on the global context. +func CMOVWPS(mr, r operand.Op) { ctx.CMOVWPS(mr, r) } + +// CMPB: Compare Two Operands. +// +// Forms: +// +// CMPB al imm8 +// CMPB r8 imm8 +// CMPB r8 r8 +// CMPB r8 m8 +// CMPB m8 imm8 +// CMPB m8 r8 +// Construct and append a CMPB instruction to the active function. +func (c *Context) CMPB(amr, imr operand.Op) { + if inst, err := x86.CMPB(amr, imr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMPB: Compare Two Operands. +// +// Forms: +// +// CMPB al imm8 +// CMPB r8 imm8 +// CMPB r8 r8 +// CMPB r8 m8 +// CMPB m8 imm8 +// CMPB m8 r8 +// Construct and append a CMPB instruction to the active function. +// Operates on the global context. +func CMPB(amr, imr operand.Op) { ctx.CMPB(amr, imr) } + +// CMPL: Compare Two Operands. +// +// Forms: +// +// CMPL eax imm32 +// CMPL r32 imm8 +// CMPL r32 imm32 +// CMPL r32 r32 +// CMPL r32 m32 +// CMPL m32 imm8 +// CMPL m32 imm32 +// CMPL m32 r32 +// Construct and append a CMPL instruction to the active function. +func (c *Context) CMPL(emr, imr operand.Op) { + if inst, err := x86.CMPL(emr, imr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMPL: Compare Two Operands. +// +// Forms: +// +// CMPL eax imm32 +// CMPL r32 imm8 +// CMPL r32 imm32 +// CMPL r32 r32 +// CMPL r32 m32 +// CMPL m32 imm8 +// CMPL m32 imm32 +// CMPL m32 r32 +// Construct and append a CMPL instruction to the active function. +// Operates on the global context. +func CMPL(emr, imr operand.Op) { ctx.CMPL(emr, imr) } + +// CMPPD: Compare Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// CMPPD xmm xmm imm8 +// CMPPD m128 xmm imm8 +// Construct and append a CMPPD instruction to the active function. +func (c *Context) CMPPD(mx, x, i operand.Op) { + if inst, err := x86.CMPPD(mx, x, i); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMPPD: Compare Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// CMPPD xmm xmm imm8 +// CMPPD m128 xmm imm8 +// Construct and append a CMPPD instruction to the active function. +// Operates on the global context. +func CMPPD(mx, x, i operand.Op) { ctx.CMPPD(mx, x, i) } + +// CMPPS: Compare Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// CMPPS xmm xmm imm8 +// CMPPS m128 xmm imm8 +// Construct and append a CMPPS instruction to the active function. +func (c *Context) CMPPS(mx, x, i operand.Op) { + if inst, err := x86.CMPPS(mx, x, i); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMPPS: Compare Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// CMPPS xmm xmm imm8 +// CMPPS m128 xmm imm8 +// Construct and append a CMPPS instruction to the active function. +// Operates on the global context. +func CMPPS(mx, x, i operand.Op) { ctx.CMPPS(mx, x, i) } + +// CMPQ: Compare Two Operands. +// +// Forms: +// +// CMPQ rax imm32 +// CMPQ r64 imm8 +// CMPQ r64 imm32 +// CMPQ r64 r64 +// CMPQ r64 m64 +// CMPQ m64 imm8 +// CMPQ m64 imm32 +// CMPQ m64 r64 +// Construct and append a CMPQ instruction to the active function. +func (c *Context) CMPQ(mr, imr operand.Op) { + if inst, err := x86.CMPQ(mr, imr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMPQ: Compare Two Operands. +// +// Forms: +// +// CMPQ rax imm32 +// CMPQ r64 imm8 +// CMPQ r64 imm32 +// CMPQ r64 r64 +// CMPQ r64 m64 +// CMPQ m64 imm8 +// CMPQ m64 imm32 +// CMPQ m64 r64 +// Construct and append a CMPQ instruction to the active function. +// Operates on the global context. +func CMPQ(mr, imr operand.Op) { ctx.CMPQ(mr, imr) } + +// CMPSD: Compare Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// CMPSD xmm xmm imm8 +// CMPSD m64 xmm imm8 +// Construct and append a CMPSD instruction to the active function. +func (c *Context) CMPSD(mx, x, i operand.Op) { + if inst, err := x86.CMPSD(mx, x, i); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMPSD: Compare Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// CMPSD xmm xmm imm8 +// CMPSD m64 xmm imm8 +// Construct and append a CMPSD instruction to the active function. +// Operates on the global context. +func CMPSD(mx, x, i operand.Op) { ctx.CMPSD(mx, x, i) } + +// CMPSS: Compare Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// CMPSS xmm xmm imm8 +// CMPSS m32 xmm imm8 +// Construct and append a CMPSS instruction to the active function. +func (c *Context) CMPSS(mx, x, i operand.Op) { + if inst, err := x86.CMPSS(mx, x, i); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMPSS: Compare Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// CMPSS xmm xmm imm8 +// CMPSS m32 xmm imm8 +// Construct and append a CMPSS instruction to the active function. +// Operates on the global context. +func CMPSS(mx, x, i operand.Op) { ctx.CMPSS(mx, x, i) } + +// CMPW: Compare Two Operands. +// +// Forms: +// +// CMPW ax imm16 +// CMPW r16 imm8 +// CMPW r16 imm16 +// CMPW r16 r16 +// CMPW r16 m16 +// CMPW m16 imm8 +// CMPW m16 imm16 +// CMPW m16 r16 +// Construct and append a CMPW instruction to the active function. +func (c *Context) CMPW(amr, imr operand.Op) { + if inst, err := x86.CMPW(amr, imr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMPW: Compare Two Operands. +// +// Forms: +// +// CMPW ax imm16 +// CMPW r16 imm8 +// CMPW r16 imm16 +// CMPW r16 r16 +// CMPW r16 m16 +// CMPW m16 imm8 +// CMPW m16 imm16 +// CMPW m16 r16 +// Construct and append a CMPW instruction to the active function. +// Operates on the global context. +func CMPW(amr, imr operand.Op) { ctx.CMPW(amr, imr) } + +// CMPXCHG16B: Compare and Exchange 16 Bytes. +// +// Forms: +// +// CMPXCHG16B m128 +// Construct and append a CMPXCHG16B instruction to the active function. +func (c *Context) CMPXCHG16B(m operand.Op) { + if inst, err := x86.CMPXCHG16B(m); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMPXCHG16B: Compare and Exchange 16 Bytes. +// +// Forms: +// +// CMPXCHG16B m128 +// Construct and append a CMPXCHG16B instruction to the active function. +// Operates on the global context. +func CMPXCHG16B(m operand.Op) { ctx.CMPXCHG16B(m) } + +// CMPXCHG8B: Compare and Exchange 8 Bytes. +// +// Forms: +// +// CMPXCHG8B m64 +// Construct and append a CMPXCHG8B instruction to the active function. +func (c *Context) CMPXCHG8B(m operand.Op) { + if inst, err := x86.CMPXCHG8B(m); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMPXCHG8B: Compare and Exchange 8 Bytes. +// +// Forms: +// +// CMPXCHG8B m64 +// Construct and append a CMPXCHG8B instruction to the active function. +// Operates on the global context. +func CMPXCHG8B(m operand.Op) { ctx.CMPXCHG8B(m) } + +// CMPXCHGB: Compare and Exchange. +// +// Forms: +// +// CMPXCHGB r8 r8 +// CMPXCHGB r8 m8 +// Construct and append a CMPXCHGB instruction to the active function. +func (c *Context) CMPXCHGB(r, mr operand.Op) { + if inst, err := x86.CMPXCHGB(r, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMPXCHGB: Compare and Exchange. +// +// Forms: +// +// CMPXCHGB r8 r8 +// CMPXCHGB r8 m8 +// Construct and append a CMPXCHGB instruction to the active function. +// Operates on the global context. +func CMPXCHGB(r, mr operand.Op) { ctx.CMPXCHGB(r, mr) } + +// CMPXCHGL: Compare and Exchange. +// +// Forms: +// +// CMPXCHGL r32 r32 +// CMPXCHGL r32 m32 +// Construct and append a CMPXCHGL instruction to the active function. +func (c *Context) CMPXCHGL(r, mr operand.Op) { + if inst, err := x86.CMPXCHGL(r, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMPXCHGL: Compare and Exchange. +// +// Forms: +// +// CMPXCHGL r32 r32 +// CMPXCHGL r32 m32 +// Construct and append a CMPXCHGL instruction to the active function. +// Operates on the global context. +func CMPXCHGL(r, mr operand.Op) { ctx.CMPXCHGL(r, mr) } + +// CMPXCHGQ: Compare and Exchange. +// +// Forms: +// +// CMPXCHGQ r64 r64 +// CMPXCHGQ r64 m64 +// Construct and append a CMPXCHGQ instruction to the active function. +func (c *Context) CMPXCHGQ(r, mr operand.Op) { + if inst, err := x86.CMPXCHGQ(r, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMPXCHGQ: Compare and Exchange. +// +// Forms: +// +// CMPXCHGQ r64 r64 +// CMPXCHGQ r64 m64 +// Construct and append a CMPXCHGQ instruction to the active function. +// Operates on the global context. +func CMPXCHGQ(r, mr operand.Op) { ctx.CMPXCHGQ(r, mr) } + +// CMPXCHGW: Compare and Exchange. +// +// Forms: +// +// CMPXCHGW r16 r16 +// CMPXCHGW r16 m16 +// Construct and append a CMPXCHGW instruction to the active function. +func (c *Context) CMPXCHGW(r, mr operand.Op) { + if inst, err := x86.CMPXCHGW(r, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CMPXCHGW: Compare and Exchange. +// +// Forms: +// +// CMPXCHGW r16 r16 +// CMPXCHGW r16 m16 +// Construct and append a CMPXCHGW instruction to the active function. +// Operates on the global context. +func CMPXCHGW(r, mr operand.Op) { ctx.CMPXCHGW(r, mr) } + +// COMISD: Compare Scalar Ordered Double-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// COMISD xmm xmm +// COMISD m64 xmm +// Construct and append a COMISD instruction to the active function. +func (c *Context) COMISD(mx, x operand.Op) { + if inst, err := x86.COMISD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// COMISD: Compare Scalar Ordered Double-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// COMISD xmm xmm +// COMISD m64 xmm +// Construct and append a COMISD instruction to the active function. +// Operates on the global context. +func COMISD(mx, x operand.Op) { ctx.COMISD(mx, x) } + +// COMISS: Compare Scalar Ordered Single-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// COMISS xmm xmm +// COMISS m32 xmm +// Construct and append a COMISS instruction to the active function. +func (c *Context) COMISS(mx, x operand.Op) { + if inst, err := x86.COMISS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// COMISS: Compare Scalar Ordered Single-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// COMISS xmm xmm +// COMISS m32 xmm +// Construct and append a COMISS instruction to the active function. +// Operates on the global context. +func COMISS(mx, x operand.Op) { ctx.COMISS(mx, x) } + +// CPUID: CPU Identification. +// +// Forms: +// +// CPUID +// Construct and append a CPUID instruction to the active function. +func (c *Context) CPUID() { + if inst, err := x86.CPUID(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CPUID: CPU Identification. +// +// Forms: +// +// CPUID +// Construct and append a CPUID instruction to the active function. +// Operates on the global context. +func CPUID() { ctx.CPUID() } + +// CQO: Convert Quadword to Octaword. +// +// Forms: +// +// CQO +// Construct and append a CQO instruction to the active function. +func (c *Context) CQO() { + if inst, err := x86.CQO(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CQO: Convert Quadword to Octaword. +// +// Forms: +// +// CQO +// Construct and append a CQO instruction to the active function. +// Operates on the global context. +func CQO() { ctx.CQO() } + +// CRC32B: Accumulate CRC32 Value. +// +// Forms: +// +// CRC32B r8 r32 +// CRC32B m8 r32 +// CRC32B r8 r64 +// CRC32B m8 r64 +// Construct and append a CRC32B instruction to the active function. +func (c *Context) CRC32B(mr, r operand.Op) { + if inst, err := x86.CRC32B(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CRC32B: Accumulate CRC32 Value. +// +// Forms: +// +// CRC32B r8 r32 +// CRC32B m8 r32 +// CRC32B r8 r64 +// CRC32B m8 r64 +// Construct and append a CRC32B instruction to the active function. +// Operates on the global context. +func CRC32B(mr, r operand.Op) { ctx.CRC32B(mr, r) } + +// CRC32L: Accumulate CRC32 Value. +// +// Forms: +// +// CRC32L r32 r32 +// CRC32L m32 r32 +// Construct and append a CRC32L instruction to the active function. +func (c *Context) CRC32L(mr, r operand.Op) { + if inst, err := x86.CRC32L(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CRC32L: Accumulate CRC32 Value. +// +// Forms: +// +// CRC32L r32 r32 +// CRC32L m32 r32 +// Construct and append a CRC32L instruction to the active function. +// Operates on the global context. +func CRC32L(mr, r operand.Op) { ctx.CRC32L(mr, r) } + +// CRC32Q: Accumulate CRC32 Value. +// +// Forms: +// +// CRC32Q r64 r64 +// CRC32Q m64 r64 +// Construct and append a CRC32Q instruction to the active function. +func (c *Context) CRC32Q(mr, r operand.Op) { + if inst, err := x86.CRC32Q(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CRC32Q: Accumulate CRC32 Value. +// +// Forms: +// +// CRC32Q r64 r64 +// CRC32Q m64 r64 +// Construct and append a CRC32Q instruction to the active function. +// Operates on the global context. +func CRC32Q(mr, r operand.Op) { ctx.CRC32Q(mr, r) } + +// CRC32W: Accumulate CRC32 Value. +// +// Forms: +// +// CRC32W r16 r32 +// CRC32W m16 r32 +// Construct and append a CRC32W instruction to the active function. +func (c *Context) CRC32W(mr, r operand.Op) { + if inst, err := x86.CRC32W(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CRC32W: Accumulate CRC32 Value. +// +// Forms: +// +// CRC32W r16 r32 +// CRC32W m16 r32 +// Construct and append a CRC32W instruction to the active function. +// Operates on the global context. +func CRC32W(mr, r operand.Op) { ctx.CRC32W(mr, r) } + +// CVTPD2PL: Convert Packed Double-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// CVTPD2PL xmm xmm +// CVTPD2PL m128 xmm +// Construct and append a CVTPD2PL instruction to the active function. +func (c *Context) CVTPD2PL(mx, x operand.Op) { + if inst, err := x86.CVTPD2PL(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CVTPD2PL: Convert Packed Double-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// CVTPD2PL xmm xmm +// CVTPD2PL m128 xmm +// Construct and append a CVTPD2PL instruction to the active function. +// Operates on the global context. +func CVTPD2PL(mx, x operand.Op) { ctx.CVTPD2PL(mx, x) } + +// CVTPD2PS: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values. +// +// Forms: +// +// CVTPD2PS xmm xmm +// CVTPD2PS m128 xmm +// Construct and append a CVTPD2PS instruction to the active function. +func (c *Context) CVTPD2PS(mx, x operand.Op) { + if inst, err := x86.CVTPD2PS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CVTPD2PS: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values. +// +// Forms: +// +// CVTPD2PS xmm xmm +// CVTPD2PS m128 xmm +// Construct and append a CVTPD2PS instruction to the active function. +// Operates on the global context. +func CVTPD2PS(mx, x operand.Op) { ctx.CVTPD2PS(mx, x) } + +// CVTPL2PD: Convert Packed Dword Integers to Packed Double-Precision FP Values. +// +// Forms: +// +// CVTPL2PD xmm xmm +// CVTPL2PD m64 xmm +// Construct and append a CVTPL2PD instruction to the active function. +func (c *Context) CVTPL2PD(mx, x operand.Op) { + if inst, err := x86.CVTPL2PD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CVTPL2PD: Convert Packed Dword Integers to Packed Double-Precision FP Values. +// +// Forms: +// +// CVTPL2PD xmm xmm +// CVTPL2PD m64 xmm +// Construct and append a CVTPL2PD instruction to the active function. +// Operates on the global context. +func CVTPL2PD(mx, x operand.Op) { ctx.CVTPL2PD(mx, x) } + +// CVTPL2PS: Convert Packed Dword Integers to Packed Single-Precision FP Values. +// +// Forms: +// +// CVTPL2PS xmm xmm +// CVTPL2PS m128 xmm +// Construct and append a CVTPL2PS instruction to the active function. +func (c *Context) CVTPL2PS(mx, x operand.Op) { + if inst, err := x86.CVTPL2PS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CVTPL2PS: Convert Packed Dword Integers to Packed Single-Precision FP Values. +// +// Forms: +// +// CVTPL2PS xmm xmm +// CVTPL2PS m128 xmm +// Construct and append a CVTPL2PS instruction to the active function. +// Operates on the global context. +func CVTPL2PS(mx, x operand.Op) { ctx.CVTPL2PS(mx, x) } + +// CVTPS2PD: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values. +// +// Forms: +// +// CVTPS2PD xmm xmm +// CVTPS2PD m64 xmm +// Construct and append a CVTPS2PD instruction to the active function. +func (c *Context) CVTPS2PD(mx, x operand.Op) { + if inst, err := x86.CVTPS2PD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CVTPS2PD: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values. +// +// Forms: +// +// CVTPS2PD xmm xmm +// CVTPS2PD m64 xmm +// Construct and append a CVTPS2PD instruction to the active function. +// Operates on the global context. +func CVTPS2PD(mx, x operand.Op) { ctx.CVTPS2PD(mx, x) } + +// CVTPS2PL: Convert Packed Single-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// CVTPS2PL xmm xmm +// CVTPS2PL m128 xmm +// Construct and append a CVTPS2PL instruction to the active function. +func (c *Context) CVTPS2PL(mx, x operand.Op) { + if inst, err := x86.CVTPS2PL(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CVTPS2PL: Convert Packed Single-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// CVTPS2PL xmm xmm +// CVTPS2PL m128 xmm +// Construct and append a CVTPS2PL instruction to the active function. +// Operates on the global context. +func CVTPS2PL(mx, x operand.Op) { ctx.CVTPS2PL(mx, x) } + +// CVTSD2SL: Convert Scalar Double-Precision FP Value to Integer. +// +// Forms: +// +// CVTSD2SL xmm r32 +// CVTSD2SL m64 r32 +// CVTSD2SL xmm r64 +// CVTSD2SL m64 r64 +// Construct and append a CVTSD2SL instruction to the active function. +func (c *Context) CVTSD2SL(mx, r operand.Op) { + if inst, err := x86.CVTSD2SL(mx, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CVTSD2SL: Convert Scalar Double-Precision FP Value to Integer. +// +// Forms: +// +// CVTSD2SL xmm r32 +// CVTSD2SL m64 r32 +// CVTSD2SL xmm r64 +// CVTSD2SL m64 r64 +// Construct and append a CVTSD2SL instruction to the active function. +// Operates on the global context. +func CVTSD2SL(mx, r operand.Op) { ctx.CVTSD2SL(mx, r) } + +// CVTSD2SS: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value. +// +// Forms: +// +// CVTSD2SS xmm xmm +// CVTSD2SS m64 xmm +// Construct and append a CVTSD2SS instruction to the active function. +func (c *Context) CVTSD2SS(mx, x operand.Op) { + if inst, err := x86.CVTSD2SS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CVTSD2SS: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value. +// +// Forms: +// +// CVTSD2SS xmm xmm +// CVTSD2SS m64 xmm +// Construct and append a CVTSD2SS instruction to the active function. +// Operates on the global context. +func CVTSD2SS(mx, x operand.Op) { ctx.CVTSD2SS(mx, x) } + +// CVTSL2SD: Convert Dword Integer to Scalar Double-Precision FP Value. +// +// Forms: +// +// CVTSL2SD r32 xmm +// CVTSL2SD m32 xmm +// Construct and append a CVTSL2SD instruction to the active function. +func (c *Context) CVTSL2SD(mr, x operand.Op) { + if inst, err := x86.CVTSL2SD(mr, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CVTSL2SD: Convert Dword Integer to Scalar Double-Precision FP Value. +// +// Forms: +// +// CVTSL2SD r32 xmm +// CVTSL2SD m32 xmm +// Construct and append a CVTSL2SD instruction to the active function. +// Operates on the global context. +func CVTSL2SD(mr, x operand.Op) { ctx.CVTSL2SD(mr, x) } + +// CVTSL2SS: Convert Dword Integer to Scalar Single-Precision FP Value. +// +// Forms: +// +// CVTSL2SS r32 xmm +// CVTSL2SS m32 xmm +// Construct and append a CVTSL2SS instruction to the active function. +func (c *Context) CVTSL2SS(mr, x operand.Op) { + if inst, err := x86.CVTSL2SS(mr, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CVTSL2SS: Convert Dword Integer to Scalar Single-Precision FP Value. +// +// Forms: +// +// CVTSL2SS r32 xmm +// CVTSL2SS m32 xmm +// Construct and append a CVTSL2SS instruction to the active function. +// Operates on the global context. +func CVTSL2SS(mr, x operand.Op) { ctx.CVTSL2SS(mr, x) } + +// CVTSQ2SD: Convert Dword Integer to Scalar Double-Precision FP Value. +// +// Forms: +// +// CVTSQ2SD r64 xmm +// CVTSQ2SD m64 xmm +// Construct and append a CVTSQ2SD instruction to the active function. +func (c *Context) CVTSQ2SD(mr, x operand.Op) { + if inst, err := x86.CVTSQ2SD(mr, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CVTSQ2SD: Convert Dword Integer to Scalar Double-Precision FP Value. +// +// Forms: +// +// CVTSQ2SD r64 xmm +// CVTSQ2SD m64 xmm +// Construct and append a CVTSQ2SD instruction to the active function. +// Operates on the global context. +func CVTSQ2SD(mr, x operand.Op) { ctx.CVTSQ2SD(mr, x) } + +// CVTSQ2SS: Convert Dword Integer to Scalar Single-Precision FP Value. +// +// Forms: +// +// CVTSQ2SS r64 xmm +// CVTSQ2SS m64 xmm +// Construct and append a CVTSQ2SS instruction to the active function. +func (c *Context) CVTSQ2SS(mr, x operand.Op) { + if inst, err := x86.CVTSQ2SS(mr, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CVTSQ2SS: Convert Dword Integer to Scalar Single-Precision FP Value. +// +// Forms: +// +// CVTSQ2SS r64 xmm +// CVTSQ2SS m64 xmm +// Construct and append a CVTSQ2SS instruction to the active function. +// Operates on the global context. +func CVTSQ2SS(mr, x operand.Op) { ctx.CVTSQ2SS(mr, x) } + +// CVTSS2SD: Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value. +// +// Forms: +// +// CVTSS2SD xmm xmm +// CVTSS2SD m32 xmm +// Construct and append a CVTSS2SD instruction to the active function. +func (c *Context) CVTSS2SD(mx, x operand.Op) { + if inst, err := x86.CVTSS2SD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CVTSS2SD: Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value. +// +// Forms: +// +// CVTSS2SD xmm xmm +// CVTSS2SD m32 xmm +// Construct and append a CVTSS2SD instruction to the active function. +// Operates on the global context. +func CVTSS2SD(mx, x operand.Op) { ctx.CVTSS2SD(mx, x) } + +// CVTSS2SL: Convert Scalar Single-Precision FP Value to Dword Integer. +// +// Forms: +// +// CVTSS2SL xmm r32 +// CVTSS2SL m32 r32 +// CVTSS2SL xmm r64 +// CVTSS2SL m32 r64 +// Construct and append a CVTSS2SL instruction to the active function. +func (c *Context) CVTSS2SL(mx, r operand.Op) { + if inst, err := x86.CVTSS2SL(mx, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CVTSS2SL: Convert Scalar Single-Precision FP Value to Dword Integer. +// +// Forms: +// +// CVTSS2SL xmm r32 +// CVTSS2SL m32 r32 +// CVTSS2SL xmm r64 +// CVTSS2SL m32 r64 +// Construct and append a CVTSS2SL instruction to the active function. +// Operates on the global context. +func CVTSS2SL(mx, r operand.Op) { ctx.CVTSS2SL(mx, r) } + +// CVTTPD2PL: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// CVTTPD2PL xmm xmm +// CVTTPD2PL m128 xmm +// Construct and append a CVTTPD2PL instruction to the active function. +func (c *Context) CVTTPD2PL(mx, x operand.Op) { + if inst, err := x86.CVTTPD2PL(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CVTTPD2PL: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// CVTTPD2PL xmm xmm +// CVTTPD2PL m128 xmm +// Construct and append a CVTTPD2PL instruction to the active function. +// Operates on the global context. +func CVTTPD2PL(mx, x operand.Op) { ctx.CVTTPD2PL(mx, x) } + +// CVTTPS2PL: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// CVTTPS2PL xmm xmm +// CVTTPS2PL m128 xmm +// Construct and append a CVTTPS2PL instruction to the active function. +func (c *Context) CVTTPS2PL(mx, x operand.Op) { + if inst, err := x86.CVTTPS2PL(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CVTTPS2PL: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// CVTTPS2PL xmm xmm +// CVTTPS2PL m128 xmm +// Construct and append a CVTTPS2PL instruction to the active function. +// Operates on the global context. +func CVTTPS2PL(mx, x operand.Op) { ctx.CVTTPS2PL(mx, x) } + +// CVTTSD2SL: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer. +// +// Forms: +// +// CVTTSD2SL xmm r32 +// CVTTSD2SL m64 r32 +// Construct and append a CVTTSD2SL instruction to the active function. +func (c *Context) CVTTSD2SL(mx, r operand.Op) { + if inst, err := x86.CVTTSD2SL(mx, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CVTTSD2SL: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer. +// +// Forms: +// +// CVTTSD2SL xmm r32 +// CVTTSD2SL m64 r32 +// Construct and append a CVTTSD2SL instruction to the active function. +// Operates on the global context. +func CVTTSD2SL(mx, r operand.Op) { ctx.CVTTSD2SL(mx, r) } + +// CVTTSD2SQ: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer. +// +// Forms: +// +// CVTTSD2SQ xmm r64 +// CVTTSD2SQ m64 r64 +// Construct and append a CVTTSD2SQ instruction to the active function. +func (c *Context) CVTTSD2SQ(mx, r operand.Op) { + if inst, err := x86.CVTTSD2SQ(mx, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CVTTSD2SQ: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer. +// +// Forms: +// +// CVTTSD2SQ xmm r64 +// CVTTSD2SQ m64 r64 +// Construct and append a CVTTSD2SQ instruction to the active function. +// Operates on the global context. +func CVTTSD2SQ(mx, r operand.Op) { ctx.CVTTSD2SQ(mx, r) } + +// CVTTSS2SL: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer. +// +// Forms: +// +// CVTTSS2SL xmm r32 +// CVTTSS2SL m32 r32 +// CVTTSS2SL xmm r64 +// CVTTSS2SL m32 r64 +// Construct and append a CVTTSS2SL instruction to the active function. +func (c *Context) CVTTSS2SL(mx, r operand.Op) { + if inst, err := x86.CVTTSS2SL(mx, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CVTTSS2SL: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer. +// +// Forms: +// +// CVTTSS2SL xmm r32 +// CVTTSS2SL m32 r32 +// CVTTSS2SL xmm r64 +// CVTTSS2SL m32 r64 +// Construct and append a CVTTSS2SL instruction to the active function. +// Operates on the global context. +func CVTTSS2SL(mx, r operand.Op) { ctx.CVTTSS2SL(mx, r) } + +// CWD: Convert Word to Doubleword. +// +// Forms: +// +// CWD +// Construct and append a CWD instruction to the active function. +func (c *Context) CWD() { + if inst, err := x86.CWD(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CWD: Convert Word to Doubleword. +// +// Forms: +// +// CWD +// Construct and append a CWD instruction to the active function. +// Operates on the global context. +func CWD() { ctx.CWD() } + +// CWDE: Convert Word to Doubleword. +// +// Forms: +// +// CWDE +// Construct and append a CWDE instruction to the active function. +func (c *Context) CWDE() { + if inst, err := x86.CWDE(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// CWDE: Convert Word to Doubleword. +// +// Forms: +// +// CWDE +// Construct and append a CWDE instruction to the active function. +// Operates on the global context. +func CWDE() { ctx.CWDE() } + +// DECB: Decrement by 1. +// +// Forms: +// +// DECB r8 +// DECB m8 +// Construct and append a DECB instruction to the active function. +func (c *Context) DECB(mr operand.Op) { + if inst, err := x86.DECB(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// DECB: Decrement by 1. +// +// Forms: +// +// DECB r8 +// DECB m8 +// Construct and append a DECB instruction to the active function. +// Operates on the global context. +func DECB(mr operand.Op) { ctx.DECB(mr) } + +// DECL: Decrement by 1. +// +// Forms: +// +// DECL r32 +// DECL m32 +// Construct and append a DECL instruction to the active function. +func (c *Context) DECL(mr operand.Op) { + if inst, err := x86.DECL(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// DECL: Decrement by 1. +// +// Forms: +// +// DECL r32 +// DECL m32 +// Construct and append a DECL instruction to the active function. +// Operates on the global context. +func DECL(mr operand.Op) { ctx.DECL(mr) } + +// DECQ: Decrement by 1. +// +// Forms: +// +// DECQ r64 +// DECQ m64 +// Construct and append a DECQ instruction to the active function. +func (c *Context) DECQ(mr operand.Op) { + if inst, err := x86.DECQ(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// DECQ: Decrement by 1. +// +// Forms: +// +// DECQ r64 +// DECQ m64 +// Construct and append a DECQ instruction to the active function. +// Operates on the global context. +func DECQ(mr operand.Op) { ctx.DECQ(mr) } + +// DECW: Decrement by 1. +// +// Forms: +// +// DECW r16 +// DECW m16 +// Construct and append a DECW instruction to the active function. +func (c *Context) DECW(mr operand.Op) { + if inst, err := x86.DECW(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// DECW: Decrement by 1. +// +// Forms: +// +// DECW r16 +// DECW m16 +// Construct and append a DECW instruction to the active function. +// Operates on the global context. +func DECW(mr operand.Op) { ctx.DECW(mr) } + +// DIVB: Unsigned Divide. +// +// Forms: +// +// DIVB r8 +// DIVB m8 +// Construct and append a DIVB instruction to the active function. +func (c *Context) DIVB(mr operand.Op) { + if inst, err := x86.DIVB(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// DIVB: Unsigned Divide. +// +// Forms: +// +// DIVB r8 +// DIVB m8 +// Construct and append a DIVB instruction to the active function. +// Operates on the global context. +func DIVB(mr operand.Op) { ctx.DIVB(mr) } + +// DIVL: Unsigned Divide. +// +// Forms: +// +// DIVL r32 +// DIVL m32 +// Construct and append a DIVL instruction to the active function. +func (c *Context) DIVL(mr operand.Op) { + if inst, err := x86.DIVL(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// DIVL: Unsigned Divide. +// +// Forms: +// +// DIVL r32 +// DIVL m32 +// Construct and append a DIVL instruction to the active function. +// Operates on the global context. +func DIVL(mr operand.Op) { ctx.DIVL(mr) } + +// DIVPD: Divide Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// DIVPD xmm xmm +// DIVPD m128 xmm +// Construct and append a DIVPD instruction to the active function. +func (c *Context) DIVPD(mx, x operand.Op) { + if inst, err := x86.DIVPD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// DIVPD: Divide Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// DIVPD xmm xmm +// DIVPD m128 xmm +// Construct and append a DIVPD instruction to the active function. +// Operates on the global context. +func DIVPD(mx, x operand.Op) { ctx.DIVPD(mx, x) } + +// DIVPS: Divide Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// DIVPS xmm xmm +// DIVPS m128 xmm +// Construct and append a DIVPS instruction to the active function. +func (c *Context) DIVPS(mx, x operand.Op) { + if inst, err := x86.DIVPS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// DIVPS: Divide Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// DIVPS xmm xmm +// DIVPS m128 xmm +// Construct and append a DIVPS instruction to the active function. +// Operates on the global context. +func DIVPS(mx, x operand.Op) { ctx.DIVPS(mx, x) } + +// DIVQ: Unsigned Divide. +// +// Forms: +// +// DIVQ r64 +// DIVQ m64 +// Construct and append a DIVQ instruction to the active function. +func (c *Context) DIVQ(mr operand.Op) { + if inst, err := x86.DIVQ(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// DIVQ: Unsigned Divide. +// +// Forms: +// +// DIVQ r64 +// DIVQ m64 +// Construct and append a DIVQ instruction to the active function. +// Operates on the global context. +func DIVQ(mr operand.Op) { ctx.DIVQ(mr) } + +// DIVSD: Divide Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// DIVSD xmm xmm +// DIVSD m64 xmm +// Construct and append a DIVSD instruction to the active function. +func (c *Context) DIVSD(mx, x operand.Op) { + if inst, err := x86.DIVSD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// DIVSD: Divide Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// DIVSD xmm xmm +// DIVSD m64 xmm +// Construct and append a DIVSD instruction to the active function. +// Operates on the global context. +func DIVSD(mx, x operand.Op) { ctx.DIVSD(mx, x) } + +// DIVSS: Divide Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// DIVSS xmm xmm +// DIVSS m32 xmm +// Construct and append a DIVSS instruction to the active function. +func (c *Context) DIVSS(mx, x operand.Op) { + if inst, err := x86.DIVSS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// DIVSS: Divide Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// DIVSS xmm xmm +// DIVSS m32 xmm +// Construct and append a DIVSS instruction to the active function. +// Operates on the global context. +func DIVSS(mx, x operand.Op) { ctx.DIVSS(mx, x) } + +// DIVW: Unsigned Divide. +// +// Forms: +// +// DIVW r16 +// DIVW m16 +// Construct and append a DIVW instruction to the active function. +func (c *Context) DIVW(mr operand.Op) { + if inst, err := x86.DIVW(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// DIVW: Unsigned Divide. +// +// Forms: +// +// DIVW r16 +// DIVW m16 +// Construct and append a DIVW instruction to the active function. +// Operates on the global context. +func DIVW(mr operand.Op) { ctx.DIVW(mr) } + +// DPPD: Dot Product of Packed Double Precision Floating-Point Values. +// +// Forms: +// +// DPPD imm8 xmm xmm +// DPPD imm8 m128 xmm +// Construct and append a DPPD instruction to the active function. +func (c *Context) DPPD(i, mx, x operand.Op) { + if inst, err := x86.DPPD(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// DPPD: Dot Product of Packed Double Precision Floating-Point Values. +// +// Forms: +// +// DPPD imm8 xmm xmm +// DPPD imm8 m128 xmm +// Construct and append a DPPD instruction to the active function. +// Operates on the global context. +func DPPD(i, mx, x operand.Op) { ctx.DPPD(i, mx, x) } + +// DPPS: Dot Product of Packed Single Precision Floating-Point Values. +// +// Forms: +// +// DPPS imm8 xmm xmm +// DPPS imm8 m128 xmm +// Construct and append a DPPS instruction to the active function. +func (c *Context) DPPS(i, mx, x operand.Op) { + if inst, err := x86.DPPS(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// DPPS: Dot Product of Packed Single Precision Floating-Point Values. +// +// Forms: +// +// DPPS imm8 xmm xmm +// DPPS imm8 m128 xmm +// Construct and append a DPPS instruction to the active function. +// Operates on the global context. +func DPPS(i, mx, x operand.Op) { ctx.DPPS(i, mx, x) } + +// EXTRACTPS: Extract Packed Single Precision Floating-Point Value. +// +// Forms: +// +// EXTRACTPS imm2u xmm r32 +// EXTRACTPS imm2u xmm m32 +// Construct and append a EXTRACTPS instruction to the active function. +func (c *Context) EXTRACTPS(i, x, mr operand.Op) { + if inst, err := x86.EXTRACTPS(i, x, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// EXTRACTPS: Extract Packed Single Precision Floating-Point Value. +// +// Forms: +// +// EXTRACTPS imm2u xmm r32 +// EXTRACTPS imm2u xmm m32 +// Construct and append a EXTRACTPS instruction to the active function. +// Operates on the global context. +func EXTRACTPS(i, x, mr operand.Op) { ctx.EXTRACTPS(i, x, mr) } + +// HADDPD: Packed Double-FP Horizontal Add. +// +// Forms: +// +// HADDPD xmm xmm +// HADDPD m128 xmm +// Construct and append a HADDPD instruction to the active function. +func (c *Context) HADDPD(mx, x operand.Op) { + if inst, err := x86.HADDPD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// HADDPD: Packed Double-FP Horizontal Add. +// +// Forms: +// +// HADDPD xmm xmm +// HADDPD m128 xmm +// Construct and append a HADDPD instruction to the active function. +// Operates on the global context. +func HADDPD(mx, x operand.Op) { ctx.HADDPD(mx, x) } + +// HADDPS: Packed Single-FP Horizontal Add. +// +// Forms: +// +// HADDPS xmm xmm +// HADDPS m128 xmm +// Construct and append a HADDPS instruction to the active function. +func (c *Context) HADDPS(mx, x operand.Op) { + if inst, err := x86.HADDPS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// HADDPS: Packed Single-FP Horizontal Add. +// +// Forms: +// +// HADDPS xmm xmm +// HADDPS m128 xmm +// Construct and append a HADDPS instruction to the active function. +// Operates on the global context. +func HADDPS(mx, x operand.Op) { ctx.HADDPS(mx, x) } + +// HSUBPD: Packed Double-FP Horizontal Subtract. +// +// Forms: +// +// HSUBPD xmm xmm +// HSUBPD m128 xmm +// Construct and append a HSUBPD instruction to the active function. +func (c *Context) HSUBPD(mx, x operand.Op) { + if inst, err := x86.HSUBPD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// HSUBPD: Packed Double-FP Horizontal Subtract. +// +// Forms: +// +// HSUBPD xmm xmm +// HSUBPD m128 xmm +// Construct and append a HSUBPD instruction to the active function. +// Operates on the global context. +func HSUBPD(mx, x operand.Op) { ctx.HSUBPD(mx, x) } + +// HSUBPS: Packed Single-FP Horizontal Subtract. +// +// Forms: +// +// HSUBPS xmm xmm +// HSUBPS m128 xmm +// Construct and append a HSUBPS instruction to the active function. +func (c *Context) HSUBPS(mx, x operand.Op) { + if inst, err := x86.HSUBPS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// HSUBPS: Packed Single-FP Horizontal Subtract. +// +// Forms: +// +// HSUBPS xmm xmm +// HSUBPS m128 xmm +// Construct and append a HSUBPS instruction to the active function. +// Operates on the global context. +func HSUBPS(mx, x operand.Op) { ctx.HSUBPS(mx, x) } + +// IDIVB: Signed Divide. +// +// Forms: +// +// IDIVB r8 +// IDIVB m8 +// Construct and append a IDIVB instruction to the active function. +func (c *Context) IDIVB(mr operand.Op) { + if inst, err := x86.IDIVB(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// IDIVB: Signed Divide. +// +// Forms: +// +// IDIVB r8 +// IDIVB m8 +// Construct and append a IDIVB instruction to the active function. +// Operates on the global context. +func IDIVB(mr operand.Op) { ctx.IDIVB(mr) } + +// IDIVL: Signed Divide. +// +// Forms: +// +// IDIVL r32 +// IDIVL m32 +// Construct and append a IDIVL instruction to the active function. +func (c *Context) IDIVL(mr operand.Op) { + if inst, err := x86.IDIVL(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// IDIVL: Signed Divide. +// +// Forms: +// +// IDIVL r32 +// IDIVL m32 +// Construct and append a IDIVL instruction to the active function. +// Operates on the global context. +func IDIVL(mr operand.Op) { ctx.IDIVL(mr) } + +// IDIVQ: Signed Divide. +// +// Forms: +// +// IDIVQ r64 +// IDIVQ m64 +// Construct and append a IDIVQ instruction to the active function. +func (c *Context) IDIVQ(mr operand.Op) { + if inst, err := x86.IDIVQ(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// IDIVQ: Signed Divide. +// +// Forms: +// +// IDIVQ r64 +// IDIVQ m64 +// Construct and append a IDIVQ instruction to the active function. +// Operates on the global context. +func IDIVQ(mr operand.Op) { ctx.IDIVQ(mr) } + +// IDIVW: Signed Divide. +// +// Forms: +// +// IDIVW r16 +// IDIVW m16 +// Construct and append a IDIVW instruction to the active function. +func (c *Context) IDIVW(mr operand.Op) { + if inst, err := x86.IDIVW(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// IDIVW: Signed Divide. +// +// Forms: +// +// IDIVW r16 +// IDIVW m16 +// Construct and append a IDIVW instruction to the active function. +// Operates on the global context. +func IDIVW(mr operand.Op) { ctx.IDIVW(mr) } + +// IMUL3L: Signed Multiply. +// +// Forms: +// +// IMUL3L imm8 r32 r32 +// IMUL3L imm32 r32 r32 +// IMUL3L imm8 m32 r32 +// IMUL3L imm32 m32 r32 +// Construct and append a IMUL3L instruction to the active function. +func (c *Context) IMUL3L(i, mr, r operand.Op) { + if inst, err := x86.IMUL3L(i, mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// IMUL3L: Signed Multiply. +// +// Forms: +// +// IMUL3L imm8 r32 r32 +// IMUL3L imm32 r32 r32 +// IMUL3L imm8 m32 r32 +// IMUL3L imm32 m32 r32 +// Construct and append a IMUL3L instruction to the active function. +// Operates on the global context. +func IMUL3L(i, mr, r operand.Op) { ctx.IMUL3L(i, mr, r) } + +// IMUL3Q: Signed Multiply. +// +// Forms: +// +// IMUL3Q imm8 r64 r64 +// IMUL3Q imm32 r64 r64 +// IMUL3Q imm8 m64 r64 +// IMUL3Q imm32 m64 r64 +// Construct and append a IMUL3Q instruction to the active function. +func (c *Context) IMUL3Q(i, mr, r operand.Op) { + if inst, err := x86.IMUL3Q(i, mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// IMUL3Q: Signed Multiply. +// +// Forms: +// +// IMUL3Q imm8 r64 r64 +// IMUL3Q imm32 r64 r64 +// IMUL3Q imm8 m64 r64 +// IMUL3Q imm32 m64 r64 +// Construct and append a IMUL3Q instruction to the active function. +// Operates on the global context. +func IMUL3Q(i, mr, r operand.Op) { ctx.IMUL3Q(i, mr, r) } + +// IMUL3W: Signed Multiply. +// +// Forms: +// +// IMUL3W imm8 r16 r16 +// IMUL3W imm16 r16 r16 +// IMUL3W imm8 m16 r16 +// IMUL3W imm16 m16 r16 +// Construct and append a IMUL3W instruction to the active function. +func (c *Context) IMUL3W(i, mr, r operand.Op) { + if inst, err := x86.IMUL3W(i, mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// IMUL3W: Signed Multiply. +// +// Forms: +// +// IMUL3W imm8 r16 r16 +// IMUL3W imm16 r16 r16 +// IMUL3W imm8 m16 r16 +// IMUL3W imm16 m16 r16 +// Construct and append a IMUL3W instruction to the active function. +// Operates on the global context. +func IMUL3W(i, mr, r operand.Op) { ctx.IMUL3W(i, mr, r) } + +// IMULB: Signed Multiply. +// +// Forms: +// +// IMULB r8 +// IMULB m8 +// Construct and append a IMULB instruction to the active function. +func (c *Context) IMULB(mr operand.Op) { + if inst, err := x86.IMULB(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// IMULB: Signed Multiply. +// +// Forms: +// +// IMULB r8 +// IMULB m8 +// Construct and append a IMULB instruction to the active function. +// Operates on the global context. +func IMULB(mr operand.Op) { ctx.IMULB(mr) } + +// IMULL: Signed Multiply. +// +// Forms: +// +// IMULL r32 +// IMULL m32 +// IMULL r32 r32 +// IMULL m32 r32 +// Construct and append a IMULL instruction to the active function. +func (c *Context) IMULL(ops ...operand.Op) { + if inst, err := x86.IMULL(ops...); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// IMULL: Signed Multiply. +// +// Forms: +// +// IMULL r32 +// IMULL m32 +// IMULL r32 r32 +// IMULL m32 r32 +// Construct and append a IMULL instruction to the active function. +// Operates on the global context. +func IMULL(ops ...operand.Op) { ctx.IMULL(ops...) } + +// IMULQ: Signed Multiply. +// +// Forms: +// +// IMULQ r64 +// IMULQ m64 +// IMULQ r64 r64 +// IMULQ m64 r64 +// Construct and append a IMULQ instruction to the active function. +func (c *Context) IMULQ(ops ...operand.Op) { + if inst, err := x86.IMULQ(ops...); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// IMULQ: Signed Multiply. +// +// Forms: +// +// IMULQ r64 +// IMULQ m64 +// IMULQ r64 r64 +// IMULQ m64 r64 +// Construct and append a IMULQ instruction to the active function. +// Operates on the global context. +func IMULQ(ops ...operand.Op) { ctx.IMULQ(ops...) } + +// IMULW: Signed Multiply. +// +// Forms: +// +// IMULW r16 +// IMULW m16 +// IMULW r16 r16 +// IMULW m16 r16 +// Construct and append a IMULW instruction to the active function. +func (c *Context) IMULW(ops ...operand.Op) { + if inst, err := x86.IMULW(ops...); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// IMULW: Signed Multiply. +// +// Forms: +// +// IMULW r16 +// IMULW m16 +// IMULW r16 r16 +// IMULW m16 r16 +// Construct and append a IMULW instruction to the active function. +// Operates on the global context. +func IMULW(ops ...operand.Op) { ctx.IMULW(ops...) } + +// INCB: Increment by 1. +// +// Forms: +// +// INCB r8 +// INCB m8 +// Construct and append a INCB instruction to the active function. +func (c *Context) INCB(mr operand.Op) { + if inst, err := x86.INCB(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// INCB: Increment by 1. +// +// Forms: +// +// INCB r8 +// INCB m8 +// Construct and append a INCB instruction to the active function. +// Operates on the global context. +func INCB(mr operand.Op) { ctx.INCB(mr) } + +// INCL: Increment by 1. +// +// Forms: +// +// INCL r32 +// INCL m32 +// Construct and append a INCL instruction to the active function. +func (c *Context) INCL(mr operand.Op) { + if inst, err := x86.INCL(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// INCL: Increment by 1. +// +// Forms: +// +// INCL r32 +// INCL m32 +// Construct and append a INCL instruction to the active function. +// Operates on the global context. +func INCL(mr operand.Op) { ctx.INCL(mr) } + +// INCQ: Increment by 1. +// +// Forms: +// +// INCQ r64 +// INCQ m64 +// Construct and append a INCQ instruction to the active function. +func (c *Context) INCQ(mr operand.Op) { + if inst, err := x86.INCQ(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// INCQ: Increment by 1. +// +// Forms: +// +// INCQ r64 +// INCQ m64 +// Construct and append a INCQ instruction to the active function. +// Operates on the global context. +func INCQ(mr operand.Op) { ctx.INCQ(mr) } + +// INCW: Increment by 1. +// +// Forms: +// +// INCW r16 +// INCW m16 +// Construct and append a INCW instruction to the active function. +func (c *Context) INCW(mr operand.Op) { + if inst, err := x86.INCW(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// INCW: Increment by 1. +// +// Forms: +// +// INCW r16 +// INCW m16 +// Construct and append a INCW instruction to the active function. +// Operates on the global context. +func INCW(mr operand.Op) { ctx.INCW(mr) } + +// INSERTPS: Insert Packed Single Precision Floating-Point Value. +// +// Forms: +// +// INSERTPS imm8 xmm xmm +// INSERTPS imm8 m32 xmm +// Construct and append a INSERTPS instruction to the active function. +func (c *Context) INSERTPS(i, mx, x operand.Op) { + if inst, err := x86.INSERTPS(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// INSERTPS: Insert Packed Single Precision Floating-Point Value. +// +// Forms: +// +// INSERTPS imm8 xmm xmm +// INSERTPS imm8 m32 xmm +// Construct and append a INSERTPS instruction to the active function. +// Operates on the global context. +func INSERTPS(i, mx, x operand.Op) { ctx.INSERTPS(i, mx, x) } + +// INT: Call to Interrupt Procedure. +// +// Forms: +// +// INT 3 +// INT imm8 +// Construct and append a INT instruction to the active function. +func (c *Context) INT(i operand.Op) { + if inst, err := x86.INT(i); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// INT: Call to Interrupt Procedure. +// +// Forms: +// +// INT 3 +// INT imm8 +// Construct and append a INT instruction to the active function. +// Operates on the global context. +func INT(i operand.Op) { ctx.INT(i) } + +// JA: Jump if above (CF == 0 and ZF == 0). +// +// Forms: +// +// JA rel8 +// JA rel32 +// Construct and append a JA instruction to the active function. +func (c *Context) JA(r operand.Op) { + if inst, err := x86.JA(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JA: Jump if above (CF == 0 and ZF == 0). +// +// Forms: +// +// JA rel8 +// JA rel32 +// Construct and append a JA instruction to the active function. +// Operates on the global context. +func JA(r operand.Op) { ctx.JA(r) } + +// JAE: Jump if above or equal (CF == 0). +// +// Forms: +// +// JAE rel8 +// JAE rel32 +// Construct and append a JAE instruction to the active function. +func (c *Context) JAE(r operand.Op) { + if inst, err := x86.JAE(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JAE: Jump if above or equal (CF == 0). +// +// Forms: +// +// JAE rel8 +// JAE rel32 +// Construct and append a JAE instruction to the active function. +// Operates on the global context. +func JAE(r operand.Op) { ctx.JAE(r) } + +// JB: Jump if below (CF == 1). +// +// Forms: +// +// JB rel8 +// JB rel32 +// Construct and append a JB instruction to the active function. +func (c *Context) JB(r operand.Op) { + if inst, err := x86.JB(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JB: Jump if below (CF == 1). +// +// Forms: +// +// JB rel8 +// JB rel32 +// Construct and append a JB instruction to the active function. +// Operates on the global context. +func JB(r operand.Op) { ctx.JB(r) } + +// JBE: Jump if below or equal (CF == 1 or ZF == 1). +// +// Forms: +// +// JBE rel8 +// JBE rel32 +// Construct and append a JBE instruction to the active function. +func (c *Context) JBE(r operand.Op) { + if inst, err := x86.JBE(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JBE: Jump if below or equal (CF == 1 or ZF == 1). +// +// Forms: +// +// JBE rel8 +// JBE rel32 +// Construct and append a JBE instruction to the active function. +// Operates on the global context. +func JBE(r operand.Op) { ctx.JBE(r) } + +// JC: Jump if below (CF == 1). +// +// Forms: +// +// JC rel8 +// JC rel32 +// Construct and append a JC instruction to the active function. +func (c *Context) JC(r operand.Op) { + if inst, err := x86.JC(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JC: Jump if below (CF == 1). +// +// Forms: +// +// JC rel8 +// JC rel32 +// Construct and append a JC instruction to the active function. +// Operates on the global context. +func JC(r operand.Op) { ctx.JC(r) } + +// JCC: Jump if above or equal (CF == 0). +// +// Forms: +// +// JCC rel8 +// JCC rel32 +// Construct and append a JCC instruction to the active function. +func (c *Context) JCC(r operand.Op) { + if inst, err := x86.JCC(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JCC: Jump if above or equal (CF == 0). +// +// Forms: +// +// JCC rel8 +// JCC rel32 +// Construct and append a JCC instruction to the active function. +// Operates on the global context. +func JCC(r operand.Op) { ctx.JCC(r) } + +// JCS: Jump if below (CF == 1). +// +// Forms: +// +// JCS rel8 +// JCS rel32 +// Construct and append a JCS instruction to the active function. +func (c *Context) JCS(r operand.Op) { + if inst, err := x86.JCS(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JCS: Jump if below (CF == 1). +// +// Forms: +// +// JCS rel8 +// JCS rel32 +// Construct and append a JCS instruction to the active function. +// Operates on the global context. +func JCS(r operand.Op) { ctx.JCS(r) } + +// JCXZL: Jump if ECX register is 0. +// +// Forms: +// +// JCXZL rel8 +// Construct and append a JCXZL instruction to the active function. +func (c *Context) JCXZL(r operand.Op) { + if inst, err := x86.JCXZL(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JCXZL: Jump if ECX register is 0. +// +// Forms: +// +// JCXZL rel8 +// Construct and append a JCXZL instruction to the active function. +// Operates on the global context. +func JCXZL(r operand.Op) { ctx.JCXZL(r) } + +// JCXZQ: Jump if RCX register is 0. +// +// Forms: +// +// JCXZQ rel8 +// Construct and append a JCXZQ instruction to the active function. +func (c *Context) JCXZQ(r operand.Op) { + if inst, err := x86.JCXZQ(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JCXZQ: Jump if RCX register is 0. +// +// Forms: +// +// JCXZQ rel8 +// Construct and append a JCXZQ instruction to the active function. +// Operates on the global context. +func JCXZQ(r operand.Op) { ctx.JCXZQ(r) } + +// JE: Jump if equal (ZF == 1). +// +// Forms: +// +// JE rel8 +// JE rel32 +// Construct and append a JE instruction to the active function. +func (c *Context) JE(r operand.Op) { + if inst, err := x86.JE(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JE: Jump if equal (ZF == 1). +// +// Forms: +// +// JE rel8 +// JE rel32 +// Construct and append a JE instruction to the active function. +// Operates on the global context. +func JE(r operand.Op) { ctx.JE(r) } + +// JEQ: Jump if equal (ZF == 1). +// +// Forms: +// +// JEQ rel8 +// JEQ rel32 +// Construct and append a JEQ instruction to the active function. +func (c *Context) JEQ(r operand.Op) { + if inst, err := x86.JEQ(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JEQ: Jump if equal (ZF == 1). +// +// Forms: +// +// JEQ rel8 +// JEQ rel32 +// Construct and append a JEQ instruction to the active function. +// Operates on the global context. +func JEQ(r operand.Op) { ctx.JEQ(r) } + +// JG: Jump if greater (ZF == 0 and SF == OF). +// +// Forms: +// +// JG rel8 +// JG rel32 +// Construct and append a JG instruction to the active function. +func (c *Context) JG(r operand.Op) { + if inst, err := x86.JG(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JG: Jump if greater (ZF == 0 and SF == OF). +// +// Forms: +// +// JG rel8 +// JG rel32 +// Construct and append a JG instruction to the active function. +// Operates on the global context. +func JG(r operand.Op) { ctx.JG(r) } + +// JGE: Jump if greater or equal (SF == OF). +// +// Forms: +// +// JGE rel8 +// JGE rel32 +// Construct and append a JGE instruction to the active function. +func (c *Context) JGE(r operand.Op) { + if inst, err := x86.JGE(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JGE: Jump if greater or equal (SF == OF). +// +// Forms: +// +// JGE rel8 +// JGE rel32 +// Construct and append a JGE instruction to the active function. +// Operates on the global context. +func JGE(r operand.Op) { ctx.JGE(r) } + +// JGT: Jump if greater (ZF == 0 and SF == OF). +// +// Forms: +// +// JGT rel8 +// JGT rel32 +// Construct and append a JGT instruction to the active function. +func (c *Context) JGT(r operand.Op) { + if inst, err := x86.JGT(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JGT: Jump if greater (ZF == 0 and SF == OF). +// +// Forms: +// +// JGT rel8 +// JGT rel32 +// Construct and append a JGT instruction to the active function. +// Operates on the global context. +func JGT(r operand.Op) { ctx.JGT(r) } + +// JHI: Jump if above (CF == 0 and ZF == 0). +// +// Forms: +// +// JHI rel8 +// JHI rel32 +// Construct and append a JHI instruction to the active function. +func (c *Context) JHI(r operand.Op) { + if inst, err := x86.JHI(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JHI: Jump if above (CF == 0 and ZF == 0). +// +// Forms: +// +// JHI rel8 +// JHI rel32 +// Construct and append a JHI instruction to the active function. +// Operates on the global context. +func JHI(r operand.Op) { ctx.JHI(r) } + +// JHS: Jump if above or equal (CF == 0). +// +// Forms: +// +// JHS rel8 +// JHS rel32 +// Construct and append a JHS instruction to the active function. +func (c *Context) JHS(r operand.Op) { + if inst, err := x86.JHS(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JHS: Jump if above or equal (CF == 0). +// +// Forms: +// +// JHS rel8 +// JHS rel32 +// Construct and append a JHS instruction to the active function. +// Operates on the global context. +func JHS(r operand.Op) { ctx.JHS(r) } + +// JL: Jump if less (SF != OF). +// +// Forms: +// +// JL rel8 +// JL rel32 +// Construct and append a JL instruction to the active function. +func (c *Context) JL(r operand.Op) { + if inst, err := x86.JL(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JL: Jump if less (SF != OF). +// +// Forms: +// +// JL rel8 +// JL rel32 +// Construct and append a JL instruction to the active function. +// Operates on the global context. +func JL(r operand.Op) { ctx.JL(r) } + +// JLE: Jump if less or equal (ZF == 1 or SF != OF). +// +// Forms: +// +// JLE rel8 +// JLE rel32 +// Construct and append a JLE instruction to the active function. +func (c *Context) JLE(r operand.Op) { + if inst, err := x86.JLE(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JLE: Jump if less or equal (ZF == 1 or SF != OF). +// +// Forms: +// +// JLE rel8 +// JLE rel32 +// Construct and append a JLE instruction to the active function. +// Operates on the global context. +func JLE(r operand.Op) { ctx.JLE(r) } + +// JLO: Jump if below (CF == 1). +// +// Forms: +// +// JLO rel8 +// JLO rel32 +// Construct and append a JLO instruction to the active function. +func (c *Context) JLO(r operand.Op) { + if inst, err := x86.JLO(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JLO: Jump if below (CF == 1). +// +// Forms: +// +// JLO rel8 +// JLO rel32 +// Construct and append a JLO instruction to the active function. +// Operates on the global context. +func JLO(r operand.Op) { ctx.JLO(r) } + +// JLS: Jump if below or equal (CF == 1 or ZF == 1). +// +// Forms: +// +// JLS rel8 +// JLS rel32 +// Construct and append a JLS instruction to the active function. +func (c *Context) JLS(r operand.Op) { + if inst, err := x86.JLS(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JLS: Jump if below or equal (CF == 1 or ZF == 1). +// +// Forms: +// +// JLS rel8 +// JLS rel32 +// Construct and append a JLS instruction to the active function. +// Operates on the global context. +func JLS(r operand.Op) { ctx.JLS(r) } + +// JLT: Jump if less (SF != OF). +// +// Forms: +// +// JLT rel8 +// JLT rel32 +// Construct and append a JLT instruction to the active function. +func (c *Context) JLT(r operand.Op) { + if inst, err := x86.JLT(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JLT: Jump if less (SF != OF). +// +// Forms: +// +// JLT rel8 +// JLT rel32 +// Construct and append a JLT instruction to the active function. +// Operates on the global context. +func JLT(r operand.Op) { ctx.JLT(r) } + +// JMI: Jump if sign (SF == 1). +// +// Forms: +// +// JMI rel8 +// JMI rel32 +// Construct and append a JMI instruction to the active function. +func (c *Context) JMI(r operand.Op) { + if inst, err := x86.JMI(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JMI: Jump if sign (SF == 1). +// +// Forms: +// +// JMI rel8 +// JMI rel32 +// Construct and append a JMI instruction to the active function. +// Operates on the global context. +func JMI(r operand.Op) { ctx.JMI(r) } + +// JMP: Jump Unconditionally. +// +// Forms: +// +// JMP rel8 +// JMP rel32 +// JMP r64 +// JMP m64 +// Construct and append a JMP instruction to the active function. +func (c *Context) JMP(mr operand.Op) { + if inst, err := x86.JMP(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JMP: Jump Unconditionally. +// +// Forms: +// +// JMP rel8 +// JMP rel32 +// JMP r64 +// JMP m64 +// Construct and append a JMP instruction to the active function. +// Operates on the global context. +func JMP(mr operand.Op) { ctx.JMP(mr) } + +// JNA: Jump if below or equal (CF == 1 or ZF == 1). +// +// Forms: +// +// JNA rel8 +// JNA rel32 +// Construct and append a JNA instruction to the active function. +func (c *Context) JNA(r operand.Op) { + if inst, err := x86.JNA(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JNA: Jump if below or equal (CF == 1 or ZF == 1). +// +// Forms: +// +// JNA rel8 +// JNA rel32 +// Construct and append a JNA instruction to the active function. +// Operates on the global context. +func JNA(r operand.Op) { ctx.JNA(r) } + +// JNAE: Jump if below (CF == 1). +// +// Forms: +// +// JNAE rel8 +// JNAE rel32 +// Construct and append a JNAE instruction to the active function. +func (c *Context) JNAE(r operand.Op) { + if inst, err := x86.JNAE(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JNAE: Jump if below (CF == 1). +// +// Forms: +// +// JNAE rel8 +// JNAE rel32 +// Construct and append a JNAE instruction to the active function. +// Operates on the global context. +func JNAE(r operand.Op) { ctx.JNAE(r) } + +// JNB: Jump if above or equal (CF == 0). +// +// Forms: +// +// JNB rel8 +// JNB rel32 +// Construct and append a JNB instruction to the active function. +func (c *Context) JNB(r operand.Op) { + if inst, err := x86.JNB(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JNB: Jump if above or equal (CF == 0). +// +// Forms: +// +// JNB rel8 +// JNB rel32 +// Construct and append a JNB instruction to the active function. +// Operates on the global context. +func JNB(r operand.Op) { ctx.JNB(r) } + +// JNBE: Jump if above (CF == 0 and ZF == 0). +// +// Forms: +// +// JNBE rel8 +// JNBE rel32 +// Construct and append a JNBE instruction to the active function. +func (c *Context) JNBE(r operand.Op) { + if inst, err := x86.JNBE(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JNBE: Jump if above (CF == 0 and ZF == 0). +// +// Forms: +// +// JNBE rel8 +// JNBE rel32 +// Construct and append a JNBE instruction to the active function. +// Operates on the global context. +func JNBE(r operand.Op) { ctx.JNBE(r) } + +// JNC: Jump if above or equal (CF == 0). +// +// Forms: +// +// JNC rel8 +// JNC rel32 +// Construct and append a JNC instruction to the active function. +func (c *Context) JNC(r operand.Op) { + if inst, err := x86.JNC(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JNC: Jump if above or equal (CF == 0). +// +// Forms: +// +// JNC rel8 +// JNC rel32 +// Construct and append a JNC instruction to the active function. +// Operates on the global context. +func JNC(r operand.Op) { ctx.JNC(r) } + +// JNE: Jump if not equal (ZF == 0). +// +// Forms: +// +// JNE rel8 +// JNE rel32 +// Construct and append a JNE instruction to the active function. +func (c *Context) JNE(r operand.Op) { + if inst, err := x86.JNE(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JNE: Jump if not equal (ZF == 0). +// +// Forms: +// +// JNE rel8 +// JNE rel32 +// Construct and append a JNE instruction to the active function. +// Operates on the global context. +func JNE(r operand.Op) { ctx.JNE(r) } + +// JNG: Jump if less or equal (ZF == 1 or SF != OF). +// +// Forms: +// +// JNG rel8 +// JNG rel32 +// Construct and append a JNG instruction to the active function. +func (c *Context) JNG(r operand.Op) { + if inst, err := x86.JNG(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JNG: Jump if less or equal (ZF == 1 or SF != OF). +// +// Forms: +// +// JNG rel8 +// JNG rel32 +// Construct and append a JNG instruction to the active function. +// Operates on the global context. +func JNG(r operand.Op) { ctx.JNG(r) } + +// JNGE: Jump if less (SF != OF). +// +// Forms: +// +// JNGE rel8 +// JNGE rel32 +// Construct and append a JNGE instruction to the active function. +func (c *Context) JNGE(r operand.Op) { + if inst, err := x86.JNGE(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JNGE: Jump if less (SF != OF). +// +// Forms: +// +// JNGE rel8 +// JNGE rel32 +// Construct and append a JNGE instruction to the active function. +// Operates on the global context. +func JNGE(r operand.Op) { ctx.JNGE(r) } + +// JNL: Jump if greater or equal (SF == OF). +// +// Forms: +// +// JNL rel8 +// JNL rel32 +// Construct and append a JNL instruction to the active function. +func (c *Context) JNL(r operand.Op) { + if inst, err := x86.JNL(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JNL: Jump if greater or equal (SF == OF). +// +// Forms: +// +// JNL rel8 +// JNL rel32 +// Construct and append a JNL instruction to the active function. +// Operates on the global context. +func JNL(r operand.Op) { ctx.JNL(r) } + +// JNLE: Jump if greater (ZF == 0 and SF == OF). +// +// Forms: +// +// JNLE rel8 +// JNLE rel32 +// Construct and append a JNLE instruction to the active function. +func (c *Context) JNLE(r operand.Op) { + if inst, err := x86.JNLE(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JNLE: Jump if greater (ZF == 0 and SF == OF). +// +// Forms: +// +// JNLE rel8 +// JNLE rel32 +// Construct and append a JNLE instruction to the active function. +// Operates on the global context. +func JNLE(r operand.Op) { ctx.JNLE(r) } + +// JNO: Jump if not overflow (OF == 0). +// +// Forms: +// +// JNO rel8 +// JNO rel32 +// Construct and append a JNO instruction to the active function. +func (c *Context) JNO(r operand.Op) { + if inst, err := x86.JNO(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JNO: Jump if not overflow (OF == 0). +// +// Forms: +// +// JNO rel8 +// JNO rel32 +// Construct and append a JNO instruction to the active function. +// Operates on the global context. +func JNO(r operand.Op) { ctx.JNO(r) } + +// JNP: Jump if not parity (PF == 0). +// +// Forms: +// +// JNP rel8 +// JNP rel32 +// Construct and append a JNP instruction to the active function. +func (c *Context) JNP(r operand.Op) { + if inst, err := x86.JNP(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JNP: Jump if not parity (PF == 0). +// +// Forms: +// +// JNP rel8 +// JNP rel32 +// Construct and append a JNP instruction to the active function. +// Operates on the global context. +func JNP(r operand.Op) { ctx.JNP(r) } + +// JNS: Jump if not sign (SF == 0). +// +// Forms: +// +// JNS rel8 +// JNS rel32 +// Construct and append a JNS instruction to the active function. +func (c *Context) JNS(r operand.Op) { + if inst, err := x86.JNS(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JNS: Jump if not sign (SF == 0). +// +// Forms: +// +// JNS rel8 +// JNS rel32 +// Construct and append a JNS instruction to the active function. +// Operates on the global context. +func JNS(r operand.Op) { ctx.JNS(r) } + +// JNZ: Jump if not equal (ZF == 0). +// +// Forms: +// +// JNZ rel8 +// JNZ rel32 +// Construct and append a JNZ instruction to the active function. +func (c *Context) JNZ(r operand.Op) { + if inst, err := x86.JNZ(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JNZ: Jump if not equal (ZF == 0). +// +// Forms: +// +// JNZ rel8 +// JNZ rel32 +// Construct and append a JNZ instruction to the active function. +// Operates on the global context. +func JNZ(r operand.Op) { ctx.JNZ(r) } + +// JO: Jump if overflow (OF == 1). +// +// Forms: +// +// JO rel8 +// JO rel32 +// Construct and append a JO instruction to the active function. +func (c *Context) JO(r operand.Op) { + if inst, err := x86.JO(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JO: Jump if overflow (OF == 1). +// +// Forms: +// +// JO rel8 +// JO rel32 +// Construct and append a JO instruction to the active function. +// Operates on the global context. +func JO(r operand.Op) { ctx.JO(r) } + +// JOC: Jump if not overflow (OF == 0). +// +// Forms: +// +// JOC rel8 +// JOC rel32 +// Construct and append a JOC instruction to the active function. +func (c *Context) JOC(r operand.Op) { + if inst, err := x86.JOC(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JOC: Jump if not overflow (OF == 0). +// +// Forms: +// +// JOC rel8 +// JOC rel32 +// Construct and append a JOC instruction to the active function. +// Operates on the global context. +func JOC(r operand.Op) { ctx.JOC(r) } + +// JOS: Jump if overflow (OF == 1). +// +// Forms: +// +// JOS rel8 +// JOS rel32 +// Construct and append a JOS instruction to the active function. +func (c *Context) JOS(r operand.Op) { + if inst, err := x86.JOS(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JOS: Jump if overflow (OF == 1). +// +// Forms: +// +// JOS rel8 +// JOS rel32 +// Construct and append a JOS instruction to the active function. +// Operates on the global context. +func JOS(r operand.Op) { ctx.JOS(r) } + +// JP: Jump if parity (PF == 1). +// +// Forms: +// +// JP rel8 +// JP rel32 +// Construct and append a JP instruction to the active function. +func (c *Context) JP(r operand.Op) { + if inst, err := x86.JP(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JP: Jump if parity (PF == 1). +// +// Forms: +// +// JP rel8 +// JP rel32 +// Construct and append a JP instruction to the active function. +// Operates on the global context. +func JP(r operand.Op) { ctx.JP(r) } + +// JPC: Jump if not parity (PF == 0). +// +// Forms: +// +// JPC rel8 +// JPC rel32 +// Construct and append a JPC instruction to the active function. +func (c *Context) JPC(r operand.Op) { + if inst, err := x86.JPC(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JPC: Jump if not parity (PF == 0). +// +// Forms: +// +// JPC rel8 +// JPC rel32 +// Construct and append a JPC instruction to the active function. +// Operates on the global context. +func JPC(r operand.Op) { ctx.JPC(r) } + +// JPE: Jump if parity (PF == 1). +// +// Forms: +// +// JPE rel8 +// JPE rel32 +// Construct and append a JPE instruction to the active function. +func (c *Context) JPE(r operand.Op) { + if inst, err := x86.JPE(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JPE: Jump if parity (PF == 1). +// +// Forms: +// +// JPE rel8 +// JPE rel32 +// Construct and append a JPE instruction to the active function. +// Operates on the global context. +func JPE(r operand.Op) { ctx.JPE(r) } + +// JPL: Jump if not sign (SF == 0). +// +// Forms: +// +// JPL rel8 +// JPL rel32 +// Construct and append a JPL instruction to the active function. +func (c *Context) JPL(r operand.Op) { + if inst, err := x86.JPL(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JPL: Jump if not sign (SF == 0). +// +// Forms: +// +// JPL rel8 +// JPL rel32 +// Construct and append a JPL instruction to the active function. +// Operates on the global context. +func JPL(r operand.Op) { ctx.JPL(r) } + +// JPO: Jump if not parity (PF == 0). +// +// Forms: +// +// JPO rel8 +// JPO rel32 +// Construct and append a JPO instruction to the active function. +func (c *Context) JPO(r operand.Op) { + if inst, err := x86.JPO(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JPO: Jump if not parity (PF == 0). +// +// Forms: +// +// JPO rel8 +// JPO rel32 +// Construct and append a JPO instruction to the active function. +// Operates on the global context. +func JPO(r operand.Op) { ctx.JPO(r) } + +// JPS: Jump if parity (PF == 1). +// +// Forms: +// +// JPS rel8 +// JPS rel32 +// Construct and append a JPS instruction to the active function. +func (c *Context) JPS(r operand.Op) { + if inst, err := x86.JPS(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JPS: Jump if parity (PF == 1). +// +// Forms: +// +// JPS rel8 +// JPS rel32 +// Construct and append a JPS instruction to the active function. +// Operates on the global context. +func JPS(r operand.Op) { ctx.JPS(r) } + +// JS: Jump if sign (SF == 1). +// +// Forms: +// +// JS rel8 +// JS rel32 +// Construct and append a JS instruction to the active function. +func (c *Context) JS(r operand.Op) { + if inst, err := x86.JS(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JS: Jump if sign (SF == 1). +// +// Forms: +// +// JS rel8 +// JS rel32 +// Construct and append a JS instruction to the active function. +// Operates on the global context. +func JS(r operand.Op) { ctx.JS(r) } + +// JZ: Jump if equal (ZF == 1). +// +// Forms: +// +// JZ rel8 +// JZ rel32 +// Construct and append a JZ instruction to the active function. +func (c *Context) JZ(r operand.Op) { + if inst, err := x86.JZ(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// JZ: Jump if equal (ZF == 1). +// +// Forms: +// +// JZ rel8 +// JZ rel32 +// Construct and append a JZ instruction to the active function. +// Operates on the global context. +func JZ(r operand.Op) { ctx.JZ(r) } + +// LDDQU: Load Unaligned Integer 128 Bits. +// +// Forms: +// +// LDDQU m128 xmm +// Construct and append a LDDQU instruction to the active function. +func (c *Context) LDDQU(m, x operand.Op) { + if inst, err := x86.LDDQU(m, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// LDDQU: Load Unaligned Integer 128 Bits. +// +// Forms: +// +// LDDQU m128 xmm +// Construct and append a LDDQU instruction to the active function. +// Operates on the global context. +func LDDQU(m, x operand.Op) { ctx.LDDQU(m, x) } + +// LDMXCSR: Load MXCSR Register. +// +// Forms: +// +// LDMXCSR m32 +// Construct and append a LDMXCSR instruction to the active function. +func (c *Context) LDMXCSR(m operand.Op) { + if inst, err := x86.LDMXCSR(m); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// LDMXCSR: Load MXCSR Register. +// +// Forms: +// +// LDMXCSR m32 +// Construct and append a LDMXCSR instruction to the active function. +// Operates on the global context. +func LDMXCSR(m operand.Op) { ctx.LDMXCSR(m) } + +// LEAL: Load Effective Address. +// +// Forms: +// +// LEAL m r32 +// Construct and append a LEAL instruction to the active function. +func (c *Context) LEAL(m, r operand.Op) { + if inst, err := x86.LEAL(m, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// LEAL: Load Effective Address. +// +// Forms: +// +// LEAL m r32 +// Construct and append a LEAL instruction to the active function. +// Operates on the global context. +func LEAL(m, r operand.Op) { ctx.LEAL(m, r) } + +// LEAQ: Load Effective Address. +// +// Forms: +// +// LEAQ m r64 +// Construct and append a LEAQ instruction to the active function. +func (c *Context) LEAQ(m, r operand.Op) { + if inst, err := x86.LEAQ(m, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// LEAQ: Load Effective Address. +// +// Forms: +// +// LEAQ m r64 +// Construct and append a LEAQ instruction to the active function. +// Operates on the global context. +func LEAQ(m, r operand.Op) { ctx.LEAQ(m, r) } + +// LEAW: Load Effective Address. +// +// Forms: +// +// LEAW m r16 +// Construct and append a LEAW instruction to the active function. +func (c *Context) LEAW(m, r operand.Op) { + if inst, err := x86.LEAW(m, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// LEAW: Load Effective Address. +// +// Forms: +// +// LEAW m r16 +// Construct and append a LEAW instruction to the active function. +// Operates on the global context. +func LEAW(m, r operand.Op) { ctx.LEAW(m, r) } + +// LFENCE: Load Fence. +// +// Forms: +// +// LFENCE +// Construct and append a LFENCE instruction to the active function. +func (c *Context) LFENCE() { + if inst, err := x86.LFENCE(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// LFENCE: Load Fence. +// +// Forms: +// +// LFENCE +// Construct and append a LFENCE instruction to the active function. +// Operates on the global context. +func LFENCE() { ctx.LFENCE() } + +// LZCNTL: Count the Number of Leading Zero Bits. +// +// Forms: +// +// LZCNTL r32 r32 +// LZCNTL m32 r32 +// Construct and append a LZCNTL instruction to the active function. +func (c *Context) LZCNTL(mr, r operand.Op) { + if inst, err := x86.LZCNTL(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// LZCNTL: Count the Number of Leading Zero Bits. +// +// Forms: +// +// LZCNTL r32 r32 +// LZCNTL m32 r32 +// Construct and append a LZCNTL instruction to the active function. +// Operates on the global context. +func LZCNTL(mr, r operand.Op) { ctx.LZCNTL(mr, r) } + +// LZCNTQ: Count the Number of Leading Zero Bits. +// +// Forms: +// +// LZCNTQ r64 r64 +// LZCNTQ m64 r64 +// Construct and append a LZCNTQ instruction to the active function. +func (c *Context) LZCNTQ(mr, r operand.Op) { + if inst, err := x86.LZCNTQ(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// LZCNTQ: Count the Number of Leading Zero Bits. +// +// Forms: +// +// LZCNTQ r64 r64 +// LZCNTQ m64 r64 +// Construct and append a LZCNTQ instruction to the active function. +// Operates on the global context. +func LZCNTQ(mr, r operand.Op) { ctx.LZCNTQ(mr, r) } + +// LZCNTW: Count the Number of Leading Zero Bits. +// +// Forms: +// +// LZCNTW r16 r16 +// LZCNTW m16 r16 +// Construct and append a LZCNTW instruction to the active function. +func (c *Context) LZCNTW(mr, r operand.Op) { + if inst, err := x86.LZCNTW(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// LZCNTW: Count the Number of Leading Zero Bits. +// +// Forms: +// +// LZCNTW r16 r16 +// LZCNTW m16 r16 +// Construct and append a LZCNTW instruction to the active function. +// Operates on the global context. +func LZCNTW(mr, r operand.Op) { ctx.LZCNTW(mr, r) } + +// MASKMOVDQU: Store Selected Bytes of Double Quadword. +// +// Forms: +// +// MASKMOVDQU xmm xmm +// Construct and append a MASKMOVDQU instruction to the active function. +func (c *Context) MASKMOVDQU(x, x1 operand.Op) { + if inst, err := x86.MASKMOVDQU(x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MASKMOVDQU: Store Selected Bytes of Double Quadword. +// +// Forms: +// +// MASKMOVDQU xmm xmm +// Construct and append a MASKMOVDQU instruction to the active function. +// Operates on the global context. +func MASKMOVDQU(x, x1 operand.Op) { ctx.MASKMOVDQU(x, x1) } + +// MASKMOVOU: Store Selected Bytes of Double Quadword. +// +// Forms: +// +// MASKMOVOU xmm xmm +// Construct and append a MASKMOVOU instruction to the active function. +func (c *Context) MASKMOVOU(x, x1 operand.Op) { + if inst, err := x86.MASKMOVOU(x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MASKMOVOU: Store Selected Bytes of Double Quadword. +// +// Forms: +// +// MASKMOVOU xmm xmm +// Construct and append a MASKMOVOU instruction to the active function. +// Operates on the global context. +func MASKMOVOU(x, x1 operand.Op) { ctx.MASKMOVOU(x, x1) } + +// MAXPD: Return Maximum Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// MAXPD xmm xmm +// MAXPD m128 xmm +// Construct and append a MAXPD instruction to the active function. +func (c *Context) MAXPD(mx, x operand.Op) { + if inst, err := x86.MAXPD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MAXPD: Return Maximum Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// MAXPD xmm xmm +// MAXPD m128 xmm +// Construct and append a MAXPD instruction to the active function. +// Operates on the global context. +func MAXPD(mx, x operand.Op) { ctx.MAXPD(mx, x) } + +// MAXPS: Return Maximum Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// MAXPS xmm xmm +// MAXPS m128 xmm +// Construct and append a MAXPS instruction to the active function. +func (c *Context) MAXPS(mx, x operand.Op) { + if inst, err := x86.MAXPS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MAXPS: Return Maximum Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// MAXPS xmm xmm +// MAXPS m128 xmm +// Construct and append a MAXPS instruction to the active function. +// Operates on the global context. +func MAXPS(mx, x operand.Op) { ctx.MAXPS(mx, x) } + +// MAXSD: Return Maximum Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// MAXSD xmm xmm +// MAXSD m64 xmm +// Construct and append a MAXSD instruction to the active function. +func (c *Context) MAXSD(mx, x operand.Op) { + if inst, err := x86.MAXSD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MAXSD: Return Maximum Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// MAXSD xmm xmm +// MAXSD m64 xmm +// Construct and append a MAXSD instruction to the active function. +// Operates on the global context. +func MAXSD(mx, x operand.Op) { ctx.MAXSD(mx, x) } + +// MAXSS: Return Maximum Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// MAXSS xmm xmm +// MAXSS m32 xmm +// Construct and append a MAXSS instruction to the active function. +func (c *Context) MAXSS(mx, x operand.Op) { + if inst, err := x86.MAXSS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MAXSS: Return Maximum Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// MAXSS xmm xmm +// MAXSS m32 xmm +// Construct and append a MAXSS instruction to the active function. +// Operates on the global context. +func MAXSS(mx, x operand.Op) { ctx.MAXSS(mx, x) } + +// MFENCE: Memory Fence. +// +// Forms: +// +// MFENCE +// Construct and append a MFENCE instruction to the active function. +func (c *Context) MFENCE() { + if inst, err := x86.MFENCE(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MFENCE: Memory Fence. +// +// Forms: +// +// MFENCE +// Construct and append a MFENCE instruction to the active function. +// Operates on the global context. +func MFENCE() { ctx.MFENCE() } + +// MINPD: Return Minimum Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// MINPD xmm xmm +// MINPD m128 xmm +// Construct and append a MINPD instruction to the active function. +func (c *Context) MINPD(mx, x operand.Op) { + if inst, err := x86.MINPD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MINPD: Return Minimum Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// MINPD xmm xmm +// MINPD m128 xmm +// Construct and append a MINPD instruction to the active function. +// Operates on the global context. +func MINPD(mx, x operand.Op) { ctx.MINPD(mx, x) } + +// MINPS: Return Minimum Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// MINPS xmm xmm +// MINPS m128 xmm +// Construct and append a MINPS instruction to the active function. +func (c *Context) MINPS(mx, x operand.Op) { + if inst, err := x86.MINPS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MINPS: Return Minimum Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// MINPS xmm xmm +// MINPS m128 xmm +// Construct and append a MINPS instruction to the active function. +// Operates on the global context. +func MINPS(mx, x operand.Op) { ctx.MINPS(mx, x) } + +// MINSD: Return Minimum Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// MINSD xmm xmm +// MINSD m64 xmm +// Construct and append a MINSD instruction to the active function. +func (c *Context) MINSD(mx, x operand.Op) { + if inst, err := x86.MINSD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MINSD: Return Minimum Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// MINSD xmm xmm +// MINSD m64 xmm +// Construct and append a MINSD instruction to the active function. +// Operates on the global context. +func MINSD(mx, x operand.Op) { ctx.MINSD(mx, x) } + +// MINSS: Return Minimum Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// MINSS xmm xmm +// MINSS m32 xmm +// Construct and append a MINSS instruction to the active function. +func (c *Context) MINSS(mx, x operand.Op) { + if inst, err := x86.MINSS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MINSS: Return Minimum Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// MINSS xmm xmm +// MINSS m32 xmm +// Construct and append a MINSS instruction to the active function. +// Operates on the global context. +func MINSS(mx, x operand.Op) { ctx.MINSS(mx, x) } + +// MONITOR: Monitor a Linear Address Range. +// +// Forms: +// +// MONITOR +// Construct and append a MONITOR instruction to the active function. +func (c *Context) MONITOR() { + if inst, err := x86.MONITOR(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MONITOR: Monitor a Linear Address Range. +// +// Forms: +// +// MONITOR +// Construct and append a MONITOR instruction to the active function. +// Operates on the global context. +func MONITOR() { ctx.MONITOR() } + +// MOVAPD: Move Aligned Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// MOVAPD xmm xmm +// MOVAPD m128 xmm +// MOVAPD xmm m128 +// Construct and append a MOVAPD instruction to the active function. +func (c *Context) MOVAPD(mx, mx1 operand.Op) { + if inst, err := x86.MOVAPD(mx, mx1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVAPD: Move Aligned Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// MOVAPD xmm xmm +// MOVAPD m128 xmm +// MOVAPD xmm m128 +// Construct and append a MOVAPD instruction to the active function. +// Operates on the global context. +func MOVAPD(mx, mx1 operand.Op) { ctx.MOVAPD(mx, mx1) } + +// MOVAPS: Move Aligned Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// MOVAPS xmm xmm +// MOVAPS m128 xmm +// MOVAPS xmm m128 +// Construct and append a MOVAPS instruction to the active function. +func (c *Context) MOVAPS(mx, mx1 operand.Op) { + if inst, err := x86.MOVAPS(mx, mx1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVAPS: Move Aligned Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// MOVAPS xmm xmm +// MOVAPS m128 xmm +// MOVAPS xmm m128 +// Construct and append a MOVAPS instruction to the active function. +// Operates on the global context. +func MOVAPS(mx, mx1 operand.Op) { ctx.MOVAPS(mx, mx1) } + +// MOVB: Move. +// +// Forms: +// +// MOVB imm8 r8 +// MOVB r8 r8 +// MOVB m8 r8 +// MOVB imm8 m8 +// MOVB r8 m8 +// Construct and append a MOVB instruction to the active function. +func (c *Context) MOVB(imr, mr operand.Op) { + if inst, err := x86.MOVB(imr, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVB: Move. +// +// Forms: +// +// MOVB imm8 r8 +// MOVB r8 r8 +// MOVB m8 r8 +// MOVB imm8 m8 +// MOVB r8 m8 +// Construct and append a MOVB instruction to the active function. +// Operates on the global context. +func MOVB(imr, mr operand.Op) { ctx.MOVB(imr, mr) } + +// MOVBELL: Move Data After Swapping Bytes. +// +// Forms: +// +// MOVBELL m32 r32 +// MOVBELL r32 m32 +// Construct and append a MOVBELL instruction to the active function. +func (c *Context) MOVBELL(mr, mr1 operand.Op) { + if inst, err := x86.MOVBELL(mr, mr1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVBELL: Move Data After Swapping Bytes. +// +// Forms: +// +// MOVBELL m32 r32 +// MOVBELL r32 m32 +// Construct and append a MOVBELL instruction to the active function. +// Operates on the global context. +func MOVBELL(mr, mr1 operand.Op) { ctx.MOVBELL(mr, mr1) } + +// MOVBEQQ: Move Data After Swapping Bytes. +// +// Forms: +// +// MOVBEQQ m64 r64 +// MOVBEQQ r64 m64 +// Construct and append a MOVBEQQ instruction to the active function. +func (c *Context) MOVBEQQ(mr, mr1 operand.Op) { + if inst, err := x86.MOVBEQQ(mr, mr1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVBEQQ: Move Data After Swapping Bytes. +// +// Forms: +// +// MOVBEQQ m64 r64 +// MOVBEQQ r64 m64 +// Construct and append a MOVBEQQ instruction to the active function. +// Operates on the global context. +func MOVBEQQ(mr, mr1 operand.Op) { ctx.MOVBEQQ(mr, mr1) } + +// MOVBEWW: Move Data After Swapping Bytes. +// +// Forms: +// +// MOVBEWW m16 r16 +// MOVBEWW r16 m16 +// Construct and append a MOVBEWW instruction to the active function. +func (c *Context) MOVBEWW(mr, mr1 operand.Op) { + if inst, err := x86.MOVBEWW(mr, mr1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVBEWW: Move Data After Swapping Bytes. +// +// Forms: +// +// MOVBEWW m16 r16 +// MOVBEWW r16 m16 +// Construct and append a MOVBEWW instruction to the active function. +// Operates on the global context. +func MOVBEWW(mr, mr1 operand.Op) { ctx.MOVBEWW(mr, mr1) } + +// MOVBLSX: Move with Sign-Extension. +// +// Forms: +// +// MOVBLSX r8 r32 +// MOVBLSX m8 r32 +// Construct and append a MOVBLSX instruction to the active function. +func (c *Context) MOVBLSX(mr, r operand.Op) { + if inst, err := x86.MOVBLSX(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVBLSX: Move with Sign-Extension. +// +// Forms: +// +// MOVBLSX r8 r32 +// MOVBLSX m8 r32 +// Construct and append a MOVBLSX instruction to the active function. +// Operates on the global context. +func MOVBLSX(mr, r operand.Op) { ctx.MOVBLSX(mr, r) } + +// MOVBLZX: Move with Zero-Extend. +// +// Forms: +// +// MOVBLZX r8 r32 +// MOVBLZX m8 r32 +// Construct and append a MOVBLZX instruction to the active function. +func (c *Context) MOVBLZX(mr, r operand.Op) { + if inst, err := x86.MOVBLZX(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVBLZX: Move with Zero-Extend. +// +// Forms: +// +// MOVBLZX r8 r32 +// MOVBLZX m8 r32 +// Construct and append a MOVBLZX instruction to the active function. +// Operates on the global context. +func MOVBLZX(mr, r operand.Op) { ctx.MOVBLZX(mr, r) } + +// MOVBQSX: Move with Sign-Extension. +// +// Forms: +// +// MOVBQSX r8 r64 +// MOVBQSX m8 r64 +// Construct and append a MOVBQSX instruction to the active function. +func (c *Context) MOVBQSX(mr, r operand.Op) { + if inst, err := x86.MOVBQSX(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVBQSX: Move with Sign-Extension. +// +// Forms: +// +// MOVBQSX r8 r64 +// MOVBQSX m8 r64 +// Construct and append a MOVBQSX instruction to the active function. +// Operates on the global context. +func MOVBQSX(mr, r operand.Op) { ctx.MOVBQSX(mr, r) } + +// MOVBQZX: Move with Zero-Extend. +// +// Forms: +// +// MOVBQZX r8 r64 +// MOVBQZX m8 r64 +// Construct and append a MOVBQZX instruction to the active function. +func (c *Context) MOVBQZX(mr, r operand.Op) { + if inst, err := x86.MOVBQZX(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVBQZX: Move with Zero-Extend. +// +// Forms: +// +// MOVBQZX r8 r64 +// MOVBQZX m8 r64 +// Construct and append a MOVBQZX instruction to the active function. +// Operates on the global context. +func MOVBQZX(mr, r operand.Op) { ctx.MOVBQZX(mr, r) } + +// MOVBWSX: Move with Sign-Extension. +// +// Forms: +// +// MOVBWSX r8 r16 +// MOVBWSX m8 r16 +// Construct and append a MOVBWSX instruction to the active function. +func (c *Context) MOVBWSX(mr, r operand.Op) { + if inst, err := x86.MOVBWSX(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVBWSX: Move with Sign-Extension. +// +// Forms: +// +// MOVBWSX r8 r16 +// MOVBWSX m8 r16 +// Construct and append a MOVBWSX instruction to the active function. +// Operates on the global context. +func MOVBWSX(mr, r operand.Op) { ctx.MOVBWSX(mr, r) } + +// MOVBWZX: Move with Zero-Extend. +// +// Forms: +// +// MOVBWZX r8 r16 +// MOVBWZX m8 r16 +// Construct and append a MOVBWZX instruction to the active function. +func (c *Context) MOVBWZX(mr, r operand.Op) { + if inst, err := x86.MOVBWZX(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVBWZX: Move with Zero-Extend. +// +// Forms: +// +// MOVBWZX r8 r16 +// MOVBWZX m8 r16 +// Construct and append a MOVBWZX instruction to the active function. +// Operates on the global context. +func MOVBWZX(mr, r operand.Op) { ctx.MOVBWZX(mr, r) } + +// MOVD: Move. +// +// Forms: +// +// MOVD imm32 r64 +// MOVD imm64 r64 +// MOVD r64 r64 +// MOVD m64 r64 +// MOVD imm32 m64 +// MOVD r64 m64 +// MOVD xmm r64 +// MOVD r64 xmm +// MOVD xmm xmm +// MOVD m64 xmm +// MOVD xmm m64 +// MOVD xmm r32 +// MOVD r32 xmm +// MOVD m32 xmm +// MOVD xmm m32 +// Construct and append a MOVD instruction to the active function. +func (c *Context) MOVD(imrx, mrx operand.Op) { + if inst, err := x86.MOVD(imrx, mrx); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVD: Move. +// +// Forms: +// +// MOVD imm32 r64 +// MOVD imm64 r64 +// MOVD r64 r64 +// MOVD m64 r64 +// MOVD imm32 m64 +// MOVD r64 m64 +// MOVD xmm r64 +// MOVD r64 xmm +// MOVD xmm xmm +// MOVD m64 xmm +// MOVD xmm m64 +// MOVD xmm r32 +// MOVD r32 xmm +// MOVD m32 xmm +// MOVD xmm m32 +// Construct and append a MOVD instruction to the active function. +// Operates on the global context. +func MOVD(imrx, mrx operand.Op) { ctx.MOVD(imrx, mrx) } + +// MOVDDUP: Move One Double-FP and Duplicate. +// +// Forms: +// +// MOVDDUP xmm xmm +// MOVDDUP m64 xmm +// Construct and append a MOVDDUP instruction to the active function. +func (c *Context) MOVDDUP(mx, x operand.Op) { + if inst, err := x86.MOVDDUP(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVDDUP: Move One Double-FP and Duplicate. +// +// Forms: +// +// MOVDDUP xmm xmm +// MOVDDUP m64 xmm +// Construct and append a MOVDDUP instruction to the active function. +// Operates on the global context. +func MOVDDUP(mx, x operand.Op) { ctx.MOVDDUP(mx, x) } + +// MOVDQ2Q: Move. +// +// Forms: +// +// MOVDQ2Q imm32 r64 +// MOVDQ2Q imm64 r64 +// MOVDQ2Q r64 r64 +// MOVDQ2Q m64 r64 +// MOVDQ2Q imm32 m64 +// MOVDQ2Q r64 m64 +// MOVDQ2Q xmm r64 +// MOVDQ2Q r64 xmm +// MOVDQ2Q xmm xmm +// MOVDQ2Q m64 xmm +// MOVDQ2Q xmm m64 +// MOVDQ2Q xmm r32 +// MOVDQ2Q r32 xmm +// MOVDQ2Q m32 xmm +// MOVDQ2Q xmm m32 +// Construct and append a MOVDQ2Q instruction to the active function. +func (c *Context) MOVDQ2Q(imrx, mrx operand.Op) { + if inst, err := x86.MOVDQ2Q(imrx, mrx); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVDQ2Q: Move. +// +// Forms: +// +// MOVDQ2Q imm32 r64 +// MOVDQ2Q imm64 r64 +// MOVDQ2Q r64 r64 +// MOVDQ2Q m64 r64 +// MOVDQ2Q imm32 m64 +// MOVDQ2Q r64 m64 +// MOVDQ2Q xmm r64 +// MOVDQ2Q r64 xmm +// MOVDQ2Q xmm xmm +// MOVDQ2Q m64 xmm +// MOVDQ2Q xmm m64 +// MOVDQ2Q xmm r32 +// MOVDQ2Q r32 xmm +// MOVDQ2Q m32 xmm +// MOVDQ2Q xmm m32 +// Construct and append a MOVDQ2Q instruction to the active function. +// Operates on the global context. +func MOVDQ2Q(imrx, mrx operand.Op) { ctx.MOVDQ2Q(imrx, mrx) } + +// MOVHLPS: Move Packed Single-Precision Floating-Point Values High to Low. +// +// Forms: +// +// MOVHLPS xmm xmm +// Construct and append a MOVHLPS instruction to the active function. +func (c *Context) MOVHLPS(x, x1 operand.Op) { + if inst, err := x86.MOVHLPS(x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVHLPS: Move Packed Single-Precision Floating-Point Values High to Low. +// +// Forms: +// +// MOVHLPS xmm xmm +// Construct and append a MOVHLPS instruction to the active function. +// Operates on the global context. +func MOVHLPS(x, x1 operand.Op) { ctx.MOVHLPS(x, x1) } + +// MOVHPD: Move High Packed Double-Precision Floating-Point Value. +// +// Forms: +// +// MOVHPD m64 xmm +// MOVHPD xmm m64 +// Construct and append a MOVHPD instruction to the active function. +func (c *Context) MOVHPD(mx, mx1 operand.Op) { + if inst, err := x86.MOVHPD(mx, mx1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVHPD: Move High Packed Double-Precision Floating-Point Value. +// +// Forms: +// +// MOVHPD m64 xmm +// MOVHPD xmm m64 +// Construct and append a MOVHPD instruction to the active function. +// Operates on the global context. +func MOVHPD(mx, mx1 operand.Op) { ctx.MOVHPD(mx, mx1) } + +// MOVHPS: Move High Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// MOVHPS m64 xmm +// MOVHPS xmm m64 +// Construct and append a MOVHPS instruction to the active function. +func (c *Context) MOVHPS(mx, mx1 operand.Op) { + if inst, err := x86.MOVHPS(mx, mx1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVHPS: Move High Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// MOVHPS m64 xmm +// MOVHPS xmm m64 +// Construct and append a MOVHPS instruction to the active function. +// Operates on the global context. +func MOVHPS(mx, mx1 operand.Op) { ctx.MOVHPS(mx, mx1) } + +// MOVL: Move. +// +// Forms: +// +// MOVL imm32 r32 +// MOVL r32 r32 +// MOVL m32 r32 +// MOVL imm32 m32 +// MOVL r32 m32 +// Construct and append a MOVL instruction to the active function. +func (c *Context) MOVL(imr, mr operand.Op) { + if inst, err := x86.MOVL(imr, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVL: Move. +// +// Forms: +// +// MOVL imm32 r32 +// MOVL r32 r32 +// MOVL m32 r32 +// MOVL imm32 m32 +// MOVL r32 m32 +// Construct and append a MOVL instruction to the active function. +// Operates on the global context. +func MOVL(imr, mr operand.Op) { ctx.MOVL(imr, mr) } + +// MOVLHPS: Move Packed Single-Precision Floating-Point Values Low to High. +// +// Forms: +// +// MOVLHPS xmm xmm +// Construct and append a MOVLHPS instruction to the active function. +func (c *Context) MOVLHPS(x, x1 operand.Op) { + if inst, err := x86.MOVLHPS(x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVLHPS: Move Packed Single-Precision Floating-Point Values Low to High. +// +// Forms: +// +// MOVLHPS xmm xmm +// Construct and append a MOVLHPS instruction to the active function. +// Operates on the global context. +func MOVLHPS(x, x1 operand.Op) { ctx.MOVLHPS(x, x1) } + +// MOVLPD: Move Low Packed Double-Precision Floating-Point Value. +// +// Forms: +// +// MOVLPD m64 xmm +// MOVLPD xmm m64 +// Construct and append a MOVLPD instruction to the active function. +func (c *Context) MOVLPD(mx, mx1 operand.Op) { + if inst, err := x86.MOVLPD(mx, mx1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVLPD: Move Low Packed Double-Precision Floating-Point Value. +// +// Forms: +// +// MOVLPD m64 xmm +// MOVLPD xmm m64 +// Construct and append a MOVLPD instruction to the active function. +// Operates on the global context. +func MOVLPD(mx, mx1 operand.Op) { ctx.MOVLPD(mx, mx1) } + +// MOVLPS: Move Low Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// MOVLPS m64 xmm +// MOVLPS xmm m64 +// Construct and append a MOVLPS instruction to the active function. +func (c *Context) MOVLPS(mx, mx1 operand.Op) { + if inst, err := x86.MOVLPS(mx, mx1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVLPS: Move Low Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// MOVLPS m64 xmm +// MOVLPS xmm m64 +// Construct and append a MOVLPS instruction to the active function. +// Operates on the global context. +func MOVLPS(mx, mx1 operand.Op) { ctx.MOVLPS(mx, mx1) } + +// MOVLQSX: Move Doubleword to Quadword with Sign-Extension. +// +// Forms: +// +// MOVLQSX r32 r64 +// MOVLQSX m32 r64 +// Construct and append a MOVLQSX instruction to the active function. +func (c *Context) MOVLQSX(mr, r operand.Op) { + if inst, err := x86.MOVLQSX(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVLQSX: Move Doubleword to Quadword with Sign-Extension. +// +// Forms: +// +// MOVLQSX r32 r64 +// MOVLQSX m32 r64 +// Construct and append a MOVLQSX instruction to the active function. +// Operates on the global context. +func MOVLQSX(mr, r operand.Op) { ctx.MOVLQSX(mr, r) } + +// MOVLQZX: Move with Zero-Extend. +// +// Forms: +// +// MOVLQZX m32 r64 +// Construct and append a MOVLQZX instruction to the active function. +func (c *Context) MOVLQZX(m, r operand.Op) { + if inst, err := x86.MOVLQZX(m, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVLQZX: Move with Zero-Extend. +// +// Forms: +// +// MOVLQZX m32 r64 +// Construct and append a MOVLQZX instruction to the active function. +// Operates on the global context. +func MOVLQZX(m, r operand.Op) { ctx.MOVLQZX(m, r) } + +// MOVMSKPD: Extract Packed Double-Precision Floating-Point Sign Mask. +// +// Forms: +// +// MOVMSKPD xmm r32 +// Construct and append a MOVMSKPD instruction to the active function. +func (c *Context) MOVMSKPD(x, r operand.Op) { + if inst, err := x86.MOVMSKPD(x, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVMSKPD: Extract Packed Double-Precision Floating-Point Sign Mask. +// +// Forms: +// +// MOVMSKPD xmm r32 +// Construct and append a MOVMSKPD instruction to the active function. +// Operates on the global context. +func MOVMSKPD(x, r operand.Op) { ctx.MOVMSKPD(x, r) } + +// MOVMSKPS: Extract Packed Single-Precision Floating-Point Sign Mask. +// +// Forms: +// +// MOVMSKPS xmm r32 +// Construct and append a MOVMSKPS instruction to the active function. +func (c *Context) MOVMSKPS(x, r operand.Op) { + if inst, err := x86.MOVMSKPS(x, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVMSKPS: Extract Packed Single-Precision Floating-Point Sign Mask. +// +// Forms: +// +// MOVMSKPS xmm r32 +// Construct and append a MOVMSKPS instruction to the active function. +// Operates on the global context. +func MOVMSKPS(x, r operand.Op) { ctx.MOVMSKPS(x, r) } + +// MOVNTDQ: Store Double Quadword Using Non-Temporal Hint. +// +// Forms: +// +// MOVNTDQ xmm m128 +// Construct and append a MOVNTDQ instruction to the active function. +func (c *Context) MOVNTDQ(x, m operand.Op) { + if inst, err := x86.MOVNTDQ(x, m); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVNTDQ: Store Double Quadword Using Non-Temporal Hint. +// +// Forms: +// +// MOVNTDQ xmm m128 +// Construct and append a MOVNTDQ instruction to the active function. +// Operates on the global context. +func MOVNTDQ(x, m operand.Op) { ctx.MOVNTDQ(x, m) } + +// MOVNTDQA: Load Double Quadword Non-Temporal Aligned Hint. +// +// Forms: +// +// MOVNTDQA m128 xmm +// Construct and append a MOVNTDQA instruction to the active function. +func (c *Context) MOVNTDQA(m, x operand.Op) { + if inst, err := x86.MOVNTDQA(m, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVNTDQA: Load Double Quadword Non-Temporal Aligned Hint. +// +// Forms: +// +// MOVNTDQA m128 xmm +// Construct and append a MOVNTDQA instruction to the active function. +// Operates on the global context. +func MOVNTDQA(m, x operand.Op) { ctx.MOVNTDQA(m, x) } + +// MOVNTIL: Store Doubleword Using Non-Temporal Hint. +// +// Forms: +// +// MOVNTIL r32 m32 +// Construct and append a MOVNTIL instruction to the active function. +func (c *Context) MOVNTIL(r, m operand.Op) { + if inst, err := x86.MOVNTIL(r, m); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVNTIL: Store Doubleword Using Non-Temporal Hint. +// +// Forms: +// +// MOVNTIL r32 m32 +// Construct and append a MOVNTIL instruction to the active function. +// Operates on the global context. +func MOVNTIL(r, m operand.Op) { ctx.MOVNTIL(r, m) } + +// MOVNTIQ: Store Doubleword Using Non-Temporal Hint. +// +// Forms: +// +// MOVNTIQ r64 m64 +// Construct and append a MOVNTIQ instruction to the active function. +func (c *Context) MOVNTIQ(r, m operand.Op) { + if inst, err := x86.MOVNTIQ(r, m); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVNTIQ: Store Doubleword Using Non-Temporal Hint. +// +// Forms: +// +// MOVNTIQ r64 m64 +// Construct and append a MOVNTIQ instruction to the active function. +// Operates on the global context. +func MOVNTIQ(r, m operand.Op) { ctx.MOVNTIQ(r, m) } + +// MOVNTO: Store Double Quadword Using Non-Temporal Hint. +// +// Forms: +// +// MOVNTO xmm m128 +// Construct and append a MOVNTO instruction to the active function. +func (c *Context) MOVNTO(x, m operand.Op) { + if inst, err := x86.MOVNTO(x, m); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVNTO: Store Double Quadword Using Non-Temporal Hint. +// +// Forms: +// +// MOVNTO xmm m128 +// Construct and append a MOVNTO instruction to the active function. +// Operates on the global context. +func MOVNTO(x, m operand.Op) { ctx.MOVNTO(x, m) } + +// MOVNTPD: Store Packed Double-Precision Floating-Point Values Using Non-Temporal Hint. +// +// Forms: +// +// MOVNTPD xmm m128 +// Construct and append a MOVNTPD instruction to the active function. +func (c *Context) MOVNTPD(x, m operand.Op) { + if inst, err := x86.MOVNTPD(x, m); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVNTPD: Store Packed Double-Precision Floating-Point Values Using Non-Temporal Hint. +// +// Forms: +// +// MOVNTPD xmm m128 +// Construct and append a MOVNTPD instruction to the active function. +// Operates on the global context. +func MOVNTPD(x, m operand.Op) { ctx.MOVNTPD(x, m) } + +// MOVNTPS: Store Packed Single-Precision Floating-Point Values Using Non-Temporal Hint. +// +// Forms: +// +// MOVNTPS xmm m128 +// Construct and append a MOVNTPS instruction to the active function. +func (c *Context) MOVNTPS(x, m operand.Op) { + if inst, err := x86.MOVNTPS(x, m); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVNTPS: Store Packed Single-Precision Floating-Point Values Using Non-Temporal Hint. +// +// Forms: +// +// MOVNTPS xmm m128 +// Construct and append a MOVNTPS instruction to the active function. +// Operates on the global context. +func MOVNTPS(x, m operand.Op) { ctx.MOVNTPS(x, m) } + +// MOVO: Move Aligned Double Quadword. +// +// Forms: +// +// MOVO xmm xmm +// MOVO m128 xmm +// MOVO xmm m128 +// Construct and append a MOVO instruction to the active function. +func (c *Context) MOVO(mx, mx1 operand.Op) { + if inst, err := x86.MOVO(mx, mx1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVO: Move Aligned Double Quadword. +// +// Forms: +// +// MOVO xmm xmm +// MOVO m128 xmm +// MOVO xmm m128 +// Construct and append a MOVO instruction to the active function. +// Operates on the global context. +func MOVO(mx, mx1 operand.Op) { ctx.MOVO(mx, mx1) } + +// MOVOA: Move Aligned Double Quadword. +// +// Forms: +// +// MOVOA xmm xmm +// MOVOA m128 xmm +// MOVOA xmm m128 +// Construct and append a MOVOA instruction to the active function. +func (c *Context) MOVOA(mx, mx1 operand.Op) { + if inst, err := x86.MOVOA(mx, mx1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVOA: Move Aligned Double Quadword. +// +// Forms: +// +// MOVOA xmm xmm +// MOVOA m128 xmm +// MOVOA xmm m128 +// Construct and append a MOVOA instruction to the active function. +// Operates on the global context. +func MOVOA(mx, mx1 operand.Op) { ctx.MOVOA(mx, mx1) } + +// MOVOU: Move Unaligned Double Quadword. +// +// Forms: +// +// MOVOU xmm xmm +// MOVOU m128 xmm +// MOVOU xmm m128 +// Construct and append a MOVOU instruction to the active function. +func (c *Context) MOVOU(mx, mx1 operand.Op) { + if inst, err := x86.MOVOU(mx, mx1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVOU: Move Unaligned Double Quadword. +// +// Forms: +// +// MOVOU xmm xmm +// MOVOU m128 xmm +// MOVOU xmm m128 +// Construct and append a MOVOU instruction to the active function. +// Operates on the global context. +func MOVOU(mx, mx1 operand.Op) { ctx.MOVOU(mx, mx1) } + +// MOVQ: Move. +// +// Forms: +// +// MOVQ imm32 r64 +// MOVQ imm64 r64 +// MOVQ r64 r64 +// MOVQ m64 r64 +// MOVQ imm32 m64 +// MOVQ r64 m64 +// MOVQ xmm r64 +// MOVQ r64 xmm +// MOVQ xmm xmm +// MOVQ m64 xmm +// MOVQ xmm m64 +// MOVQ xmm r32 +// MOVQ r32 xmm +// MOVQ m32 xmm +// MOVQ xmm m32 +// Construct and append a MOVQ instruction to the active function. +func (c *Context) MOVQ(imrx, mrx operand.Op) { + if inst, err := x86.MOVQ(imrx, mrx); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVQ: Move. +// +// Forms: +// +// MOVQ imm32 r64 +// MOVQ imm64 r64 +// MOVQ r64 r64 +// MOVQ m64 r64 +// MOVQ imm32 m64 +// MOVQ r64 m64 +// MOVQ xmm r64 +// MOVQ r64 xmm +// MOVQ xmm xmm +// MOVQ m64 xmm +// MOVQ xmm m64 +// MOVQ xmm r32 +// MOVQ r32 xmm +// MOVQ m32 xmm +// MOVQ xmm m32 +// Construct and append a MOVQ instruction to the active function. +// Operates on the global context. +func MOVQ(imrx, mrx operand.Op) { ctx.MOVQ(imrx, mrx) } + +// MOVSD: Move Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// MOVSD xmm xmm +// MOVSD m64 xmm +// MOVSD xmm m64 +// Construct and append a MOVSD instruction to the active function. +func (c *Context) MOVSD(mx, mx1 operand.Op) { + if inst, err := x86.MOVSD(mx, mx1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVSD: Move Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// MOVSD xmm xmm +// MOVSD m64 xmm +// MOVSD xmm m64 +// Construct and append a MOVSD instruction to the active function. +// Operates on the global context. +func MOVSD(mx, mx1 operand.Op) { ctx.MOVSD(mx, mx1) } + +// MOVSHDUP: Move Packed Single-FP High and Duplicate. +// +// Forms: +// +// MOVSHDUP xmm xmm +// MOVSHDUP m128 xmm +// Construct and append a MOVSHDUP instruction to the active function. +func (c *Context) MOVSHDUP(mx, x operand.Op) { + if inst, err := x86.MOVSHDUP(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVSHDUP: Move Packed Single-FP High and Duplicate. +// +// Forms: +// +// MOVSHDUP xmm xmm +// MOVSHDUP m128 xmm +// Construct and append a MOVSHDUP instruction to the active function. +// Operates on the global context. +func MOVSHDUP(mx, x operand.Op) { ctx.MOVSHDUP(mx, x) } + +// MOVSLDUP: Move Packed Single-FP Low and Duplicate. +// +// Forms: +// +// MOVSLDUP xmm xmm +// MOVSLDUP m128 xmm +// Construct and append a MOVSLDUP instruction to the active function. +func (c *Context) MOVSLDUP(mx, x operand.Op) { + if inst, err := x86.MOVSLDUP(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVSLDUP: Move Packed Single-FP Low and Duplicate. +// +// Forms: +// +// MOVSLDUP xmm xmm +// MOVSLDUP m128 xmm +// Construct and append a MOVSLDUP instruction to the active function. +// Operates on the global context. +func MOVSLDUP(mx, x operand.Op) { ctx.MOVSLDUP(mx, x) } + +// MOVSS: Move Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// MOVSS xmm xmm +// MOVSS m32 xmm +// MOVSS xmm m32 +// Construct and append a MOVSS instruction to the active function. +func (c *Context) MOVSS(mx, mx1 operand.Op) { + if inst, err := x86.MOVSS(mx, mx1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVSS: Move Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// MOVSS xmm xmm +// MOVSS m32 xmm +// MOVSS xmm m32 +// Construct and append a MOVSS instruction to the active function. +// Operates on the global context. +func MOVSS(mx, mx1 operand.Op) { ctx.MOVSS(mx, mx1) } + +// MOVUPD: Move Unaligned Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// MOVUPD xmm xmm +// MOVUPD m128 xmm +// MOVUPD xmm m128 +// Construct and append a MOVUPD instruction to the active function. +func (c *Context) MOVUPD(mx, mx1 operand.Op) { + if inst, err := x86.MOVUPD(mx, mx1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVUPD: Move Unaligned Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// MOVUPD xmm xmm +// MOVUPD m128 xmm +// MOVUPD xmm m128 +// Construct and append a MOVUPD instruction to the active function. +// Operates on the global context. +func MOVUPD(mx, mx1 operand.Op) { ctx.MOVUPD(mx, mx1) } + +// MOVUPS: Move Unaligned Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// MOVUPS xmm xmm +// MOVUPS m128 xmm +// MOVUPS xmm m128 +// Construct and append a MOVUPS instruction to the active function. +func (c *Context) MOVUPS(mx, mx1 operand.Op) { + if inst, err := x86.MOVUPS(mx, mx1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVUPS: Move Unaligned Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// MOVUPS xmm xmm +// MOVUPS m128 xmm +// MOVUPS xmm m128 +// Construct and append a MOVUPS instruction to the active function. +// Operates on the global context. +func MOVUPS(mx, mx1 operand.Op) { ctx.MOVUPS(mx, mx1) } + +// MOVW: Move. +// +// Forms: +// +// MOVW imm16 r16 +// MOVW r16 r16 +// MOVW m16 r16 +// MOVW imm16 m16 +// MOVW r16 m16 +// Construct and append a MOVW instruction to the active function. +func (c *Context) MOVW(imr, mr operand.Op) { + if inst, err := x86.MOVW(imr, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVW: Move. +// +// Forms: +// +// MOVW imm16 r16 +// MOVW r16 r16 +// MOVW m16 r16 +// MOVW imm16 m16 +// MOVW r16 m16 +// Construct and append a MOVW instruction to the active function. +// Operates on the global context. +func MOVW(imr, mr operand.Op) { ctx.MOVW(imr, mr) } + +// MOVWLSX: Move with Sign-Extension. +// +// Forms: +// +// MOVWLSX r16 r32 +// MOVWLSX m16 r32 +// Construct and append a MOVWLSX instruction to the active function. +func (c *Context) MOVWLSX(mr, r operand.Op) { + if inst, err := x86.MOVWLSX(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVWLSX: Move with Sign-Extension. +// +// Forms: +// +// MOVWLSX r16 r32 +// MOVWLSX m16 r32 +// Construct and append a MOVWLSX instruction to the active function. +// Operates on the global context. +func MOVWLSX(mr, r operand.Op) { ctx.MOVWLSX(mr, r) } + +// MOVWLZX: Move with Zero-Extend. +// +// Forms: +// +// MOVWLZX r16 r32 +// MOVWLZX m16 r32 +// Construct and append a MOVWLZX instruction to the active function. +func (c *Context) MOVWLZX(mr, r operand.Op) { + if inst, err := x86.MOVWLZX(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVWLZX: Move with Zero-Extend. +// +// Forms: +// +// MOVWLZX r16 r32 +// MOVWLZX m16 r32 +// Construct and append a MOVWLZX instruction to the active function. +// Operates on the global context. +func MOVWLZX(mr, r operand.Op) { ctx.MOVWLZX(mr, r) } + +// MOVWQSX: Move with Sign-Extension. +// +// Forms: +// +// MOVWQSX r16 r64 +// MOVWQSX m16 r64 +// Construct and append a MOVWQSX instruction to the active function. +func (c *Context) MOVWQSX(mr, r operand.Op) { + if inst, err := x86.MOVWQSX(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVWQSX: Move with Sign-Extension. +// +// Forms: +// +// MOVWQSX r16 r64 +// MOVWQSX m16 r64 +// Construct and append a MOVWQSX instruction to the active function. +// Operates on the global context. +func MOVWQSX(mr, r operand.Op) { ctx.MOVWQSX(mr, r) } + +// MOVWQZX: Move with Zero-Extend. +// +// Forms: +// +// MOVWQZX r16 r64 +// MOVWQZX m16 r64 +// Construct and append a MOVWQZX instruction to the active function. +func (c *Context) MOVWQZX(mr, r operand.Op) { + if inst, err := x86.MOVWQZX(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MOVWQZX: Move with Zero-Extend. +// +// Forms: +// +// MOVWQZX r16 r64 +// MOVWQZX m16 r64 +// Construct and append a MOVWQZX instruction to the active function. +// Operates on the global context. +func MOVWQZX(mr, r operand.Op) { ctx.MOVWQZX(mr, r) } + +// MPSADBW: Compute Multiple Packed Sums of Absolute Difference. +// +// Forms: +// +// MPSADBW imm8 xmm xmm +// MPSADBW imm8 m128 xmm +// Construct and append a MPSADBW instruction to the active function. +func (c *Context) MPSADBW(i, mx, x operand.Op) { + if inst, err := x86.MPSADBW(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MPSADBW: Compute Multiple Packed Sums of Absolute Difference. +// +// Forms: +// +// MPSADBW imm8 xmm xmm +// MPSADBW imm8 m128 xmm +// Construct and append a MPSADBW instruction to the active function. +// Operates on the global context. +func MPSADBW(i, mx, x operand.Op) { ctx.MPSADBW(i, mx, x) } + +// MULB: Unsigned Multiply. +// +// Forms: +// +// MULB r8 +// MULB m8 +// Construct and append a MULB instruction to the active function. +func (c *Context) MULB(mr operand.Op) { + if inst, err := x86.MULB(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MULB: Unsigned Multiply. +// +// Forms: +// +// MULB r8 +// MULB m8 +// Construct and append a MULB instruction to the active function. +// Operates on the global context. +func MULB(mr operand.Op) { ctx.MULB(mr) } + +// MULL: Unsigned Multiply. +// +// Forms: +// +// MULL r32 +// MULL m32 +// Construct and append a MULL instruction to the active function. +func (c *Context) MULL(mr operand.Op) { + if inst, err := x86.MULL(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MULL: Unsigned Multiply. +// +// Forms: +// +// MULL r32 +// MULL m32 +// Construct and append a MULL instruction to the active function. +// Operates on the global context. +func MULL(mr operand.Op) { ctx.MULL(mr) } + +// MULPD: Multiply Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// MULPD xmm xmm +// MULPD m128 xmm +// Construct and append a MULPD instruction to the active function. +func (c *Context) MULPD(mx, x operand.Op) { + if inst, err := x86.MULPD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MULPD: Multiply Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// MULPD xmm xmm +// MULPD m128 xmm +// Construct and append a MULPD instruction to the active function. +// Operates on the global context. +func MULPD(mx, x operand.Op) { ctx.MULPD(mx, x) } + +// MULPS: Multiply Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// MULPS xmm xmm +// MULPS m128 xmm +// Construct and append a MULPS instruction to the active function. +func (c *Context) MULPS(mx, x operand.Op) { + if inst, err := x86.MULPS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MULPS: Multiply Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// MULPS xmm xmm +// MULPS m128 xmm +// Construct and append a MULPS instruction to the active function. +// Operates on the global context. +func MULPS(mx, x operand.Op) { ctx.MULPS(mx, x) } + +// MULQ: Unsigned Multiply. +// +// Forms: +// +// MULQ r64 +// MULQ m64 +// Construct and append a MULQ instruction to the active function. +func (c *Context) MULQ(mr operand.Op) { + if inst, err := x86.MULQ(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MULQ: Unsigned Multiply. +// +// Forms: +// +// MULQ r64 +// MULQ m64 +// Construct and append a MULQ instruction to the active function. +// Operates on the global context. +func MULQ(mr operand.Op) { ctx.MULQ(mr) } + +// MULSD: Multiply Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// MULSD xmm xmm +// MULSD m64 xmm +// Construct and append a MULSD instruction to the active function. +func (c *Context) MULSD(mx, x operand.Op) { + if inst, err := x86.MULSD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MULSD: Multiply Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// MULSD xmm xmm +// MULSD m64 xmm +// Construct and append a MULSD instruction to the active function. +// Operates on the global context. +func MULSD(mx, x operand.Op) { ctx.MULSD(mx, x) } + +// MULSS: Multiply Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// MULSS xmm xmm +// MULSS m32 xmm +// Construct and append a MULSS instruction to the active function. +func (c *Context) MULSS(mx, x operand.Op) { + if inst, err := x86.MULSS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MULSS: Multiply Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// MULSS xmm xmm +// MULSS m32 xmm +// Construct and append a MULSS instruction to the active function. +// Operates on the global context. +func MULSS(mx, x operand.Op) { ctx.MULSS(mx, x) } + +// MULW: Unsigned Multiply. +// +// Forms: +// +// MULW r16 +// MULW m16 +// Construct and append a MULW instruction to the active function. +func (c *Context) MULW(mr operand.Op) { + if inst, err := x86.MULW(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MULW: Unsigned Multiply. +// +// Forms: +// +// MULW r16 +// MULW m16 +// Construct and append a MULW instruction to the active function. +// Operates on the global context. +func MULW(mr operand.Op) { ctx.MULW(mr) } + +// MULXL: Unsigned Multiply Without Affecting Flags. +// +// Forms: +// +// MULXL r32 r32 r32 +// MULXL m32 r32 r32 +// Construct and append a MULXL instruction to the active function. +func (c *Context) MULXL(mr, r, r1 operand.Op) { + if inst, err := x86.MULXL(mr, r, r1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MULXL: Unsigned Multiply Without Affecting Flags. +// +// Forms: +// +// MULXL r32 r32 r32 +// MULXL m32 r32 r32 +// Construct and append a MULXL instruction to the active function. +// Operates on the global context. +func MULXL(mr, r, r1 operand.Op) { ctx.MULXL(mr, r, r1) } + +// MULXQ: Unsigned Multiply Without Affecting Flags. +// +// Forms: +// +// MULXQ r64 r64 r64 +// MULXQ m64 r64 r64 +// Construct and append a MULXQ instruction to the active function. +func (c *Context) MULXQ(mr, r, r1 operand.Op) { + if inst, err := x86.MULXQ(mr, r, r1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MULXQ: Unsigned Multiply Without Affecting Flags. +// +// Forms: +// +// MULXQ r64 r64 r64 +// MULXQ m64 r64 r64 +// Construct and append a MULXQ instruction to the active function. +// Operates on the global context. +func MULXQ(mr, r, r1 operand.Op) { ctx.MULXQ(mr, r, r1) } + +// MWAIT: Monitor Wait. +// +// Forms: +// +// MWAIT +// Construct and append a MWAIT instruction to the active function. +func (c *Context) MWAIT() { + if inst, err := x86.MWAIT(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// MWAIT: Monitor Wait. +// +// Forms: +// +// MWAIT +// Construct and append a MWAIT instruction to the active function. +// Operates on the global context. +func MWAIT() { ctx.MWAIT() } + +// NEGB: Two's Complement Negation. +// +// Forms: +// +// NEGB r8 +// NEGB m8 +// Construct and append a NEGB instruction to the active function. +func (c *Context) NEGB(mr operand.Op) { + if inst, err := x86.NEGB(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// NEGB: Two's Complement Negation. +// +// Forms: +// +// NEGB r8 +// NEGB m8 +// Construct and append a NEGB instruction to the active function. +// Operates on the global context. +func NEGB(mr operand.Op) { ctx.NEGB(mr) } + +// NEGL: Two's Complement Negation. +// +// Forms: +// +// NEGL r32 +// NEGL m32 +// Construct and append a NEGL instruction to the active function. +func (c *Context) NEGL(mr operand.Op) { + if inst, err := x86.NEGL(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// NEGL: Two's Complement Negation. +// +// Forms: +// +// NEGL r32 +// NEGL m32 +// Construct and append a NEGL instruction to the active function. +// Operates on the global context. +func NEGL(mr operand.Op) { ctx.NEGL(mr) } + +// NEGQ: Two's Complement Negation. +// +// Forms: +// +// NEGQ r64 +// NEGQ m64 +// Construct and append a NEGQ instruction to the active function. +func (c *Context) NEGQ(mr operand.Op) { + if inst, err := x86.NEGQ(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// NEGQ: Two's Complement Negation. +// +// Forms: +// +// NEGQ r64 +// NEGQ m64 +// Construct and append a NEGQ instruction to the active function. +// Operates on the global context. +func NEGQ(mr operand.Op) { ctx.NEGQ(mr) } + +// NEGW: Two's Complement Negation. +// +// Forms: +// +// NEGW r16 +// NEGW m16 +// Construct and append a NEGW instruction to the active function. +func (c *Context) NEGW(mr operand.Op) { + if inst, err := x86.NEGW(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// NEGW: Two's Complement Negation. +// +// Forms: +// +// NEGW r16 +// NEGW m16 +// Construct and append a NEGW instruction to the active function. +// Operates on the global context. +func NEGW(mr operand.Op) { ctx.NEGW(mr) } + +// NOP: No Operation. +// +// Forms: +// +// NOP +// Construct and append a NOP instruction to the active function. +func (c *Context) NOP() { + if inst, err := x86.NOP(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// NOP: No Operation. +// +// Forms: +// +// NOP +// Construct and append a NOP instruction to the active function. +// Operates on the global context. +func NOP() { ctx.NOP() } + +// NOTB: One's Complement Negation. +// +// Forms: +// +// NOTB r8 +// NOTB m8 +// Construct and append a NOTB instruction to the active function. +func (c *Context) NOTB(mr operand.Op) { + if inst, err := x86.NOTB(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// NOTB: One's Complement Negation. +// +// Forms: +// +// NOTB r8 +// NOTB m8 +// Construct and append a NOTB instruction to the active function. +// Operates on the global context. +func NOTB(mr operand.Op) { ctx.NOTB(mr) } + +// NOTL: One's Complement Negation. +// +// Forms: +// +// NOTL r32 +// NOTL m32 +// Construct and append a NOTL instruction to the active function. +func (c *Context) NOTL(mr operand.Op) { + if inst, err := x86.NOTL(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// NOTL: One's Complement Negation. +// +// Forms: +// +// NOTL r32 +// NOTL m32 +// Construct and append a NOTL instruction to the active function. +// Operates on the global context. +func NOTL(mr operand.Op) { ctx.NOTL(mr) } + +// NOTQ: One's Complement Negation. +// +// Forms: +// +// NOTQ r64 +// NOTQ m64 +// Construct and append a NOTQ instruction to the active function. +func (c *Context) NOTQ(mr operand.Op) { + if inst, err := x86.NOTQ(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// NOTQ: One's Complement Negation. +// +// Forms: +// +// NOTQ r64 +// NOTQ m64 +// Construct and append a NOTQ instruction to the active function. +// Operates on the global context. +func NOTQ(mr operand.Op) { ctx.NOTQ(mr) } + +// NOTW: One's Complement Negation. +// +// Forms: +// +// NOTW r16 +// NOTW m16 +// Construct and append a NOTW instruction to the active function. +func (c *Context) NOTW(mr operand.Op) { + if inst, err := x86.NOTW(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// NOTW: One's Complement Negation. +// +// Forms: +// +// NOTW r16 +// NOTW m16 +// Construct and append a NOTW instruction to the active function. +// Operates on the global context. +func NOTW(mr operand.Op) { ctx.NOTW(mr) } + +// ORB: Logical Inclusive OR. +// +// Forms: +// +// ORB imm8 al +// ORB imm8 r8 +// ORB r8 r8 +// ORB m8 r8 +// ORB imm8 m8 +// ORB r8 m8 +// Construct and append a ORB instruction to the active function. +func (c *Context) ORB(imr, amr operand.Op) { + if inst, err := x86.ORB(imr, amr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ORB: Logical Inclusive OR. +// +// Forms: +// +// ORB imm8 al +// ORB imm8 r8 +// ORB r8 r8 +// ORB m8 r8 +// ORB imm8 m8 +// ORB r8 m8 +// Construct and append a ORB instruction to the active function. +// Operates on the global context. +func ORB(imr, amr operand.Op) { ctx.ORB(imr, amr) } + +// ORL: Logical Inclusive OR. +// +// Forms: +// +// ORL imm32 eax +// ORL imm8 r32 +// ORL imm32 r32 +// ORL r32 r32 +// ORL m32 r32 +// ORL imm8 m32 +// ORL imm32 m32 +// ORL r32 m32 +// Construct and append a ORL instruction to the active function. +func (c *Context) ORL(imr, emr operand.Op) { + if inst, err := x86.ORL(imr, emr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ORL: Logical Inclusive OR. +// +// Forms: +// +// ORL imm32 eax +// ORL imm8 r32 +// ORL imm32 r32 +// ORL r32 r32 +// ORL m32 r32 +// ORL imm8 m32 +// ORL imm32 m32 +// ORL r32 m32 +// Construct and append a ORL instruction to the active function. +// Operates on the global context. +func ORL(imr, emr operand.Op) { ctx.ORL(imr, emr) } + +// ORPD: Bitwise Logical OR of Double-Precision Floating-Point Values. +// +// Forms: +// +// ORPD xmm xmm +// ORPD m128 xmm +// Construct and append a ORPD instruction to the active function. +func (c *Context) ORPD(mx, x operand.Op) { + if inst, err := x86.ORPD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ORPD: Bitwise Logical OR of Double-Precision Floating-Point Values. +// +// Forms: +// +// ORPD xmm xmm +// ORPD m128 xmm +// Construct and append a ORPD instruction to the active function. +// Operates on the global context. +func ORPD(mx, x operand.Op) { ctx.ORPD(mx, x) } + +// ORPS: Bitwise Logical OR of Single-Precision Floating-Point Values. +// +// Forms: +// +// ORPS xmm xmm +// ORPS m128 xmm +// Construct and append a ORPS instruction to the active function. +func (c *Context) ORPS(mx, x operand.Op) { + if inst, err := x86.ORPS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ORPS: Bitwise Logical OR of Single-Precision Floating-Point Values. +// +// Forms: +// +// ORPS xmm xmm +// ORPS m128 xmm +// Construct and append a ORPS instruction to the active function. +// Operates on the global context. +func ORPS(mx, x operand.Op) { ctx.ORPS(mx, x) } + +// ORQ: Logical Inclusive OR. +// +// Forms: +// +// ORQ imm32 rax +// ORQ imm8 r64 +// ORQ imm32 r64 +// ORQ r64 r64 +// ORQ m64 r64 +// ORQ imm8 m64 +// ORQ imm32 m64 +// ORQ r64 m64 +// Construct and append a ORQ instruction to the active function. +func (c *Context) ORQ(imr, mr operand.Op) { + if inst, err := x86.ORQ(imr, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ORQ: Logical Inclusive OR. +// +// Forms: +// +// ORQ imm32 rax +// ORQ imm8 r64 +// ORQ imm32 r64 +// ORQ r64 r64 +// ORQ m64 r64 +// ORQ imm8 m64 +// ORQ imm32 m64 +// ORQ r64 m64 +// Construct and append a ORQ instruction to the active function. +// Operates on the global context. +func ORQ(imr, mr operand.Op) { ctx.ORQ(imr, mr) } + +// ORW: Logical Inclusive OR. +// +// Forms: +// +// ORW imm16 ax +// ORW imm8 r16 +// ORW imm16 r16 +// ORW r16 r16 +// ORW m16 r16 +// ORW imm8 m16 +// ORW imm16 m16 +// ORW r16 m16 +// Construct and append a ORW instruction to the active function. +func (c *Context) ORW(imr, amr operand.Op) { + if inst, err := x86.ORW(imr, amr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ORW: Logical Inclusive OR. +// +// Forms: +// +// ORW imm16 ax +// ORW imm8 r16 +// ORW imm16 r16 +// ORW r16 r16 +// ORW m16 r16 +// ORW imm8 m16 +// ORW imm16 m16 +// ORW r16 m16 +// Construct and append a ORW instruction to the active function. +// Operates on the global context. +func ORW(imr, amr operand.Op) { ctx.ORW(imr, amr) } + +// PABSB: Packed Absolute Value of Byte Integers. +// +// Forms: +// +// PABSB xmm xmm +// PABSB m128 xmm +// Construct and append a PABSB instruction to the active function. +func (c *Context) PABSB(mx, x operand.Op) { + if inst, err := x86.PABSB(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PABSB: Packed Absolute Value of Byte Integers. +// +// Forms: +// +// PABSB xmm xmm +// PABSB m128 xmm +// Construct and append a PABSB instruction to the active function. +// Operates on the global context. +func PABSB(mx, x operand.Op) { ctx.PABSB(mx, x) } + +// PABSD: Packed Absolute Value of Doubleword Integers. +// +// Forms: +// +// PABSD xmm xmm +// PABSD m128 xmm +// Construct and append a PABSD instruction to the active function. +func (c *Context) PABSD(mx, x operand.Op) { + if inst, err := x86.PABSD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PABSD: Packed Absolute Value of Doubleword Integers. +// +// Forms: +// +// PABSD xmm xmm +// PABSD m128 xmm +// Construct and append a PABSD instruction to the active function. +// Operates on the global context. +func PABSD(mx, x operand.Op) { ctx.PABSD(mx, x) } + +// PABSW: Packed Absolute Value of Word Integers. +// +// Forms: +// +// PABSW xmm xmm +// PABSW m128 xmm +// Construct and append a PABSW instruction to the active function. +func (c *Context) PABSW(mx, x operand.Op) { + if inst, err := x86.PABSW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PABSW: Packed Absolute Value of Word Integers. +// +// Forms: +// +// PABSW xmm xmm +// PABSW m128 xmm +// Construct and append a PABSW instruction to the active function. +// Operates on the global context. +func PABSW(mx, x operand.Op) { ctx.PABSW(mx, x) } + +// PACKSSLW: Pack Doublewords into Words with Signed Saturation. +// +// Forms: +// +// PACKSSLW xmm xmm +// PACKSSLW m128 xmm +// Construct and append a PACKSSLW instruction to the active function. +func (c *Context) PACKSSLW(mx, x operand.Op) { + if inst, err := x86.PACKSSLW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PACKSSLW: Pack Doublewords into Words with Signed Saturation. +// +// Forms: +// +// PACKSSLW xmm xmm +// PACKSSLW m128 xmm +// Construct and append a PACKSSLW instruction to the active function. +// Operates on the global context. +func PACKSSLW(mx, x operand.Op) { ctx.PACKSSLW(mx, x) } + +// PACKSSWB: Pack Words into Bytes with Signed Saturation. +// +// Forms: +// +// PACKSSWB xmm xmm +// PACKSSWB m128 xmm +// Construct and append a PACKSSWB instruction to the active function. +func (c *Context) PACKSSWB(mx, x operand.Op) { + if inst, err := x86.PACKSSWB(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PACKSSWB: Pack Words into Bytes with Signed Saturation. +// +// Forms: +// +// PACKSSWB xmm xmm +// PACKSSWB m128 xmm +// Construct and append a PACKSSWB instruction to the active function. +// Operates on the global context. +func PACKSSWB(mx, x operand.Op) { ctx.PACKSSWB(mx, x) } + +// PACKUSDW: Pack Doublewords into Words with Unsigned Saturation. +// +// Forms: +// +// PACKUSDW xmm xmm +// PACKUSDW m128 xmm +// Construct and append a PACKUSDW instruction to the active function. +func (c *Context) PACKUSDW(mx, x operand.Op) { + if inst, err := x86.PACKUSDW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PACKUSDW: Pack Doublewords into Words with Unsigned Saturation. +// +// Forms: +// +// PACKUSDW xmm xmm +// PACKUSDW m128 xmm +// Construct and append a PACKUSDW instruction to the active function. +// Operates on the global context. +func PACKUSDW(mx, x operand.Op) { ctx.PACKUSDW(mx, x) } + +// PACKUSWB: Pack Words into Bytes with Unsigned Saturation. +// +// Forms: +// +// PACKUSWB xmm xmm +// PACKUSWB m128 xmm +// Construct and append a PACKUSWB instruction to the active function. +func (c *Context) PACKUSWB(mx, x operand.Op) { + if inst, err := x86.PACKUSWB(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PACKUSWB: Pack Words into Bytes with Unsigned Saturation. +// +// Forms: +// +// PACKUSWB xmm xmm +// PACKUSWB m128 xmm +// Construct and append a PACKUSWB instruction to the active function. +// Operates on the global context. +func PACKUSWB(mx, x operand.Op) { ctx.PACKUSWB(mx, x) } + +// PADDB: Add Packed Byte Integers. +// +// Forms: +// +// PADDB xmm xmm +// PADDB m128 xmm +// Construct and append a PADDB instruction to the active function. +func (c *Context) PADDB(mx, x operand.Op) { + if inst, err := x86.PADDB(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PADDB: Add Packed Byte Integers. +// +// Forms: +// +// PADDB xmm xmm +// PADDB m128 xmm +// Construct and append a PADDB instruction to the active function. +// Operates on the global context. +func PADDB(mx, x operand.Op) { ctx.PADDB(mx, x) } + +// PADDD: Add Packed Doubleword Integers. +// +// Forms: +// +// PADDD xmm xmm +// PADDD m128 xmm +// Construct and append a PADDD instruction to the active function. +func (c *Context) PADDD(mx, x operand.Op) { + if inst, err := x86.PADDD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PADDD: Add Packed Doubleword Integers. +// +// Forms: +// +// PADDD xmm xmm +// PADDD m128 xmm +// Construct and append a PADDD instruction to the active function. +// Operates on the global context. +func PADDD(mx, x operand.Op) { ctx.PADDD(mx, x) } + +// PADDL: Add Packed Doubleword Integers. +// +// Forms: +// +// PADDL xmm xmm +// PADDL m128 xmm +// Construct and append a PADDL instruction to the active function. +func (c *Context) PADDL(mx, x operand.Op) { + if inst, err := x86.PADDL(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PADDL: Add Packed Doubleword Integers. +// +// Forms: +// +// PADDL xmm xmm +// PADDL m128 xmm +// Construct and append a PADDL instruction to the active function. +// Operates on the global context. +func PADDL(mx, x operand.Op) { ctx.PADDL(mx, x) } + +// PADDQ: Add Packed Quadword Integers. +// +// Forms: +// +// PADDQ xmm xmm +// PADDQ m128 xmm +// Construct and append a PADDQ instruction to the active function. +func (c *Context) PADDQ(mx, x operand.Op) { + if inst, err := x86.PADDQ(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PADDQ: Add Packed Quadword Integers. +// +// Forms: +// +// PADDQ xmm xmm +// PADDQ m128 xmm +// Construct and append a PADDQ instruction to the active function. +// Operates on the global context. +func PADDQ(mx, x operand.Op) { ctx.PADDQ(mx, x) } + +// PADDSB: Add Packed Signed Byte Integers with Signed Saturation. +// +// Forms: +// +// PADDSB xmm xmm +// PADDSB m128 xmm +// Construct and append a PADDSB instruction to the active function. +func (c *Context) PADDSB(mx, x operand.Op) { + if inst, err := x86.PADDSB(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PADDSB: Add Packed Signed Byte Integers with Signed Saturation. +// +// Forms: +// +// PADDSB xmm xmm +// PADDSB m128 xmm +// Construct and append a PADDSB instruction to the active function. +// Operates on the global context. +func PADDSB(mx, x operand.Op) { ctx.PADDSB(mx, x) } + +// PADDSW: Add Packed Signed Word Integers with Signed Saturation. +// +// Forms: +// +// PADDSW xmm xmm +// PADDSW m128 xmm +// Construct and append a PADDSW instruction to the active function. +func (c *Context) PADDSW(mx, x operand.Op) { + if inst, err := x86.PADDSW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PADDSW: Add Packed Signed Word Integers with Signed Saturation. +// +// Forms: +// +// PADDSW xmm xmm +// PADDSW m128 xmm +// Construct and append a PADDSW instruction to the active function. +// Operates on the global context. +func PADDSW(mx, x operand.Op) { ctx.PADDSW(mx, x) } + +// PADDUSB: Add Packed Unsigned Byte Integers with Unsigned Saturation. +// +// Forms: +// +// PADDUSB xmm xmm +// PADDUSB m128 xmm +// Construct and append a PADDUSB instruction to the active function. +func (c *Context) PADDUSB(mx, x operand.Op) { + if inst, err := x86.PADDUSB(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PADDUSB: Add Packed Unsigned Byte Integers with Unsigned Saturation. +// +// Forms: +// +// PADDUSB xmm xmm +// PADDUSB m128 xmm +// Construct and append a PADDUSB instruction to the active function. +// Operates on the global context. +func PADDUSB(mx, x operand.Op) { ctx.PADDUSB(mx, x) } + +// PADDUSW: Add Packed Unsigned Word Integers with Unsigned Saturation. +// +// Forms: +// +// PADDUSW xmm xmm +// PADDUSW m128 xmm +// Construct and append a PADDUSW instruction to the active function. +func (c *Context) PADDUSW(mx, x operand.Op) { + if inst, err := x86.PADDUSW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PADDUSW: Add Packed Unsigned Word Integers with Unsigned Saturation. +// +// Forms: +// +// PADDUSW xmm xmm +// PADDUSW m128 xmm +// Construct and append a PADDUSW instruction to the active function. +// Operates on the global context. +func PADDUSW(mx, x operand.Op) { ctx.PADDUSW(mx, x) } + +// PADDW: Add Packed Word Integers. +// +// Forms: +// +// PADDW xmm xmm +// PADDW m128 xmm +// Construct and append a PADDW instruction to the active function. +func (c *Context) PADDW(mx, x operand.Op) { + if inst, err := x86.PADDW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PADDW: Add Packed Word Integers. +// +// Forms: +// +// PADDW xmm xmm +// PADDW m128 xmm +// Construct and append a PADDW instruction to the active function. +// Operates on the global context. +func PADDW(mx, x operand.Op) { ctx.PADDW(mx, x) } + +// PALIGNR: Packed Align Right. +// +// Forms: +// +// PALIGNR imm8 xmm xmm +// PALIGNR imm8 m128 xmm +// Construct and append a PALIGNR instruction to the active function. +func (c *Context) PALIGNR(i, mx, x operand.Op) { + if inst, err := x86.PALIGNR(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PALIGNR: Packed Align Right. +// +// Forms: +// +// PALIGNR imm8 xmm xmm +// PALIGNR imm8 m128 xmm +// Construct and append a PALIGNR instruction to the active function. +// Operates on the global context. +func PALIGNR(i, mx, x operand.Op) { ctx.PALIGNR(i, mx, x) } + +// PAND: Packed Bitwise Logical AND. +// +// Forms: +// +// PAND xmm xmm +// PAND m128 xmm +// Construct and append a PAND instruction to the active function. +func (c *Context) PAND(mx, x operand.Op) { + if inst, err := x86.PAND(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PAND: Packed Bitwise Logical AND. +// +// Forms: +// +// PAND xmm xmm +// PAND m128 xmm +// Construct and append a PAND instruction to the active function. +// Operates on the global context. +func PAND(mx, x operand.Op) { ctx.PAND(mx, x) } + +// PANDN: Packed Bitwise Logical AND NOT. +// +// Forms: +// +// PANDN xmm xmm +// PANDN m128 xmm +// Construct and append a PANDN instruction to the active function. +func (c *Context) PANDN(mx, x operand.Op) { + if inst, err := x86.PANDN(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PANDN: Packed Bitwise Logical AND NOT. +// +// Forms: +// +// PANDN xmm xmm +// PANDN m128 xmm +// Construct and append a PANDN instruction to the active function. +// Operates on the global context. +func PANDN(mx, x operand.Op) { ctx.PANDN(mx, x) } + +// PAUSE: Spin Loop Hint. +// +// Forms: +// +// PAUSE +// Construct and append a PAUSE instruction to the active function. +func (c *Context) PAUSE() { + if inst, err := x86.PAUSE(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PAUSE: Spin Loop Hint. +// +// Forms: +// +// PAUSE +// Construct and append a PAUSE instruction to the active function. +// Operates on the global context. +func PAUSE() { ctx.PAUSE() } + +// PAVGB: Average Packed Byte Integers. +// +// Forms: +// +// PAVGB xmm xmm +// PAVGB m128 xmm +// Construct and append a PAVGB instruction to the active function. +func (c *Context) PAVGB(mx, x operand.Op) { + if inst, err := x86.PAVGB(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PAVGB: Average Packed Byte Integers. +// +// Forms: +// +// PAVGB xmm xmm +// PAVGB m128 xmm +// Construct and append a PAVGB instruction to the active function. +// Operates on the global context. +func PAVGB(mx, x operand.Op) { ctx.PAVGB(mx, x) } + +// PAVGW: Average Packed Word Integers. +// +// Forms: +// +// PAVGW xmm xmm +// PAVGW m128 xmm +// Construct and append a PAVGW instruction to the active function. +func (c *Context) PAVGW(mx, x operand.Op) { + if inst, err := x86.PAVGW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PAVGW: Average Packed Word Integers. +// +// Forms: +// +// PAVGW xmm xmm +// PAVGW m128 xmm +// Construct and append a PAVGW instruction to the active function. +// Operates on the global context. +func PAVGW(mx, x operand.Op) { ctx.PAVGW(mx, x) } + +// PBLENDVB: Variable Blend Packed Bytes. +// +// Forms: +// +// PBLENDVB xmm0 xmm xmm +// PBLENDVB xmm0 m128 xmm +// Construct and append a PBLENDVB instruction to the active function. +func (c *Context) PBLENDVB(x, mx, x1 operand.Op) { + if inst, err := x86.PBLENDVB(x, mx, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PBLENDVB: Variable Blend Packed Bytes. +// +// Forms: +// +// PBLENDVB xmm0 xmm xmm +// PBLENDVB xmm0 m128 xmm +// Construct and append a PBLENDVB instruction to the active function. +// Operates on the global context. +func PBLENDVB(x, mx, x1 operand.Op) { ctx.PBLENDVB(x, mx, x1) } + +// PBLENDW: Blend Packed Words. +// +// Forms: +// +// PBLENDW imm8 xmm xmm +// PBLENDW imm8 m128 xmm +// Construct and append a PBLENDW instruction to the active function. +func (c *Context) PBLENDW(i, mx, x operand.Op) { + if inst, err := x86.PBLENDW(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PBLENDW: Blend Packed Words. +// +// Forms: +// +// PBLENDW imm8 xmm xmm +// PBLENDW imm8 m128 xmm +// Construct and append a PBLENDW instruction to the active function. +// Operates on the global context. +func PBLENDW(i, mx, x operand.Op) { ctx.PBLENDW(i, mx, x) } + +// PCLMULQDQ: Carry-Less Quadword Multiplication. +// +// Forms: +// +// PCLMULQDQ imm8 xmm xmm +// PCLMULQDQ imm8 m128 xmm +// Construct and append a PCLMULQDQ instruction to the active function. +func (c *Context) PCLMULQDQ(i, mx, x operand.Op) { + if inst, err := x86.PCLMULQDQ(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PCLMULQDQ: Carry-Less Quadword Multiplication. +// +// Forms: +// +// PCLMULQDQ imm8 xmm xmm +// PCLMULQDQ imm8 m128 xmm +// Construct and append a PCLMULQDQ instruction to the active function. +// Operates on the global context. +func PCLMULQDQ(i, mx, x operand.Op) { ctx.PCLMULQDQ(i, mx, x) } + +// PCMPEQB: Compare Packed Byte Data for Equality. +// +// Forms: +// +// PCMPEQB xmm xmm +// PCMPEQB m128 xmm +// Construct and append a PCMPEQB instruction to the active function. +func (c *Context) PCMPEQB(mx, x operand.Op) { + if inst, err := x86.PCMPEQB(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PCMPEQB: Compare Packed Byte Data for Equality. +// +// Forms: +// +// PCMPEQB xmm xmm +// PCMPEQB m128 xmm +// Construct and append a PCMPEQB instruction to the active function. +// Operates on the global context. +func PCMPEQB(mx, x operand.Op) { ctx.PCMPEQB(mx, x) } + +// PCMPEQL: Compare Packed Doubleword Data for Equality. +// +// Forms: +// +// PCMPEQL xmm xmm +// PCMPEQL m128 xmm +// Construct and append a PCMPEQL instruction to the active function. +func (c *Context) PCMPEQL(mx, x operand.Op) { + if inst, err := x86.PCMPEQL(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PCMPEQL: Compare Packed Doubleword Data for Equality. +// +// Forms: +// +// PCMPEQL xmm xmm +// PCMPEQL m128 xmm +// Construct and append a PCMPEQL instruction to the active function. +// Operates on the global context. +func PCMPEQL(mx, x operand.Op) { ctx.PCMPEQL(mx, x) } + +// PCMPEQQ: Compare Packed Quadword Data for Equality. +// +// Forms: +// +// PCMPEQQ xmm xmm +// PCMPEQQ m128 xmm +// Construct and append a PCMPEQQ instruction to the active function. +func (c *Context) PCMPEQQ(mx, x operand.Op) { + if inst, err := x86.PCMPEQQ(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PCMPEQQ: Compare Packed Quadword Data for Equality. +// +// Forms: +// +// PCMPEQQ xmm xmm +// PCMPEQQ m128 xmm +// Construct and append a PCMPEQQ instruction to the active function. +// Operates on the global context. +func PCMPEQQ(mx, x operand.Op) { ctx.PCMPEQQ(mx, x) } + +// PCMPEQW: Compare Packed Word Data for Equality. +// +// Forms: +// +// PCMPEQW xmm xmm +// PCMPEQW m128 xmm +// Construct and append a PCMPEQW instruction to the active function. +func (c *Context) PCMPEQW(mx, x operand.Op) { + if inst, err := x86.PCMPEQW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PCMPEQW: Compare Packed Word Data for Equality. +// +// Forms: +// +// PCMPEQW xmm xmm +// PCMPEQW m128 xmm +// Construct and append a PCMPEQW instruction to the active function. +// Operates on the global context. +func PCMPEQW(mx, x operand.Op) { ctx.PCMPEQW(mx, x) } + +// PCMPESTRI: Packed Compare Explicit Length Strings, Return Index. +// +// Forms: +// +// PCMPESTRI imm8 xmm xmm +// PCMPESTRI imm8 m128 xmm +// Construct and append a PCMPESTRI instruction to the active function. +func (c *Context) PCMPESTRI(i, mx, x operand.Op) { + if inst, err := x86.PCMPESTRI(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PCMPESTRI: Packed Compare Explicit Length Strings, Return Index. +// +// Forms: +// +// PCMPESTRI imm8 xmm xmm +// PCMPESTRI imm8 m128 xmm +// Construct and append a PCMPESTRI instruction to the active function. +// Operates on the global context. +func PCMPESTRI(i, mx, x operand.Op) { ctx.PCMPESTRI(i, mx, x) } + +// PCMPESTRM: Packed Compare Explicit Length Strings, Return Mask. +// +// Forms: +// +// PCMPESTRM imm8 xmm xmm +// PCMPESTRM imm8 m128 xmm +// Construct and append a PCMPESTRM instruction to the active function. +func (c *Context) PCMPESTRM(i, mx, x operand.Op) { + if inst, err := x86.PCMPESTRM(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PCMPESTRM: Packed Compare Explicit Length Strings, Return Mask. +// +// Forms: +// +// PCMPESTRM imm8 xmm xmm +// PCMPESTRM imm8 m128 xmm +// Construct and append a PCMPESTRM instruction to the active function. +// Operates on the global context. +func PCMPESTRM(i, mx, x operand.Op) { ctx.PCMPESTRM(i, mx, x) } + +// PCMPGTB: Compare Packed Signed Byte Integers for Greater Than. +// +// Forms: +// +// PCMPGTB xmm xmm +// PCMPGTB m128 xmm +// Construct and append a PCMPGTB instruction to the active function. +func (c *Context) PCMPGTB(mx, x operand.Op) { + if inst, err := x86.PCMPGTB(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PCMPGTB: Compare Packed Signed Byte Integers for Greater Than. +// +// Forms: +// +// PCMPGTB xmm xmm +// PCMPGTB m128 xmm +// Construct and append a PCMPGTB instruction to the active function. +// Operates on the global context. +func PCMPGTB(mx, x operand.Op) { ctx.PCMPGTB(mx, x) } + +// PCMPGTL: Compare Packed Signed Doubleword Integers for Greater Than. +// +// Forms: +// +// PCMPGTL xmm xmm +// PCMPGTL m128 xmm +// Construct and append a PCMPGTL instruction to the active function. +func (c *Context) PCMPGTL(mx, x operand.Op) { + if inst, err := x86.PCMPGTL(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PCMPGTL: Compare Packed Signed Doubleword Integers for Greater Than. +// +// Forms: +// +// PCMPGTL xmm xmm +// PCMPGTL m128 xmm +// Construct and append a PCMPGTL instruction to the active function. +// Operates on the global context. +func PCMPGTL(mx, x operand.Op) { ctx.PCMPGTL(mx, x) } + +// PCMPGTQ: Compare Packed Data for Greater Than. +// +// Forms: +// +// PCMPGTQ xmm xmm +// PCMPGTQ m128 xmm +// Construct and append a PCMPGTQ instruction to the active function. +func (c *Context) PCMPGTQ(mx, x operand.Op) { + if inst, err := x86.PCMPGTQ(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PCMPGTQ: Compare Packed Data for Greater Than. +// +// Forms: +// +// PCMPGTQ xmm xmm +// PCMPGTQ m128 xmm +// Construct and append a PCMPGTQ instruction to the active function. +// Operates on the global context. +func PCMPGTQ(mx, x operand.Op) { ctx.PCMPGTQ(mx, x) } + +// PCMPGTW: Compare Packed Signed Word Integers for Greater Than. +// +// Forms: +// +// PCMPGTW xmm xmm +// PCMPGTW m128 xmm +// Construct and append a PCMPGTW instruction to the active function. +func (c *Context) PCMPGTW(mx, x operand.Op) { + if inst, err := x86.PCMPGTW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PCMPGTW: Compare Packed Signed Word Integers for Greater Than. +// +// Forms: +// +// PCMPGTW xmm xmm +// PCMPGTW m128 xmm +// Construct and append a PCMPGTW instruction to the active function. +// Operates on the global context. +func PCMPGTW(mx, x operand.Op) { ctx.PCMPGTW(mx, x) } + +// PCMPISTRI: Packed Compare Implicit Length Strings, Return Index. +// +// Forms: +// +// PCMPISTRI imm8 xmm xmm +// PCMPISTRI imm8 m128 xmm +// Construct and append a PCMPISTRI instruction to the active function. +func (c *Context) PCMPISTRI(i, mx, x operand.Op) { + if inst, err := x86.PCMPISTRI(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PCMPISTRI: Packed Compare Implicit Length Strings, Return Index. +// +// Forms: +// +// PCMPISTRI imm8 xmm xmm +// PCMPISTRI imm8 m128 xmm +// Construct and append a PCMPISTRI instruction to the active function. +// Operates on the global context. +func PCMPISTRI(i, mx, x operand.Op) { ctx.PCMPISTRI(i, mx, x) } + +// PCMPISTRM: Packed Compare Implicit Length Strings, Return Mask. +// +// Forms: +// +// PCMPISTRM imm8 xmm xmm +// PCMPISTRM imm8 m128 xmm +// Construct and append a PCMPISTRM instruction to the active function. +func (c *Context) PCMPISTRM(i, mx, x operand.Op) { + if inst, err := x86.PCMPISTRM(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PCMPISTRM: Packed Compare Implicit Length Strings, Return Mask. +// +// Forms: +// +// PCMPISTRM imm8 xmm xmm +// PCMPISTRM imm8 m128 xmm +// Construct and append a PCMPISTRM instruction to the active function. +// Operates on the global context. +func PCMPISTRM(i, mx, x operand.Op) { ctx.PCMPISTRM(i, mx, x) } + +// PDEPL: Parallel Bits Deposit. +// +// Forms: +// +// PDEPL r32 r32 r32 +// PDEPL m32 r32 r32 +// Construct and append a PDEPL instruction to the active function. +func (c *Context) PDEPL(mr, r, r1 operand.Op) { + if inst, err := x86.PDEPL(mr, r, r1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PDEPL: Parallel Bits Deposit. +// +// Forms: +// +// PDEPL r32 r32 r32 +// PDEPL m32 r32 r32 +// Construct and append a PDEPL instruction to the active function. +// Operates on the global context. +func PDEPL(mr, r, r1 operand.Op) { ctx.PDEPL(mr, r, r1) } + +// PDEPQ: Parallel Bits Deposit. +// +// Forms: +// +// PDEPQ r64 r64 r64 +// PDEPQ m64 r64 r64 +// Construct and append a PDEPQ instruction to the active function. +func (c *Context) PDEPQ(mr, r, r1 operand.Op) { + if inst, err := x86.PDEPQ(mr, r, r1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PDEPQ: Parallel Bits Deposit. +// +// Forms: +// +// PDEPQ r64 r64 r64 +// PDEPQ m64 r64 r64 +// Construct and append a PDEPQ instruction to the active function. +// Operates on the global context. +func PDEPQ(mr, r, r1 operand.Op) { ctx.PDEPQ(mr, r, r1) } + +// PEXTL: Parallel Bits Extract. +// +// Forms: +// +// PEXTL r32 r32 r32 +// PEXTL m32 r32 r32 +// Construct and append a PEXTL instruction to the active function. +func (c *Context) PEXTL(mr, r, r1 operand.Op) { + if inst, err := x86.PEXTL(mr, r, r1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PEXTL: Parallel Bits Extract. +// +// Forms: +// +// PEXTL r32 r32 r32 +// PEXTL m32 r32 r32 +// Construct and append a PEXTL instruction to the active function. +// Operates on the global context. +func PEXTL(mr, r, r1 operand.Op) { ctx.PEXTL(mr, r, r1) } + +// PEXTQ: Parallel Bits Extract. +// +// Forms: +// +// PEXTQ r64 r64 r64 +// PEXTQ m64 r64 r64 +// Construct and append a PEXTQ instruction to the active function. +func (c *Context) PEXTQ(mr, r, r1 operand.Op) { + if inst, err := x86.PEXTQ(mr, r, r1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PEXTQ: Parallel Bits Extract. +// +// Forms: +// +// PEXTQ r64 r64 r64 +// PEXTQ m64 r64 r64 +// Construct and append a PEXTQ instruction to the active function. +// Operates on the global context. +func PEXTQ(mr, r, r1 operand.Op) { ctx.PEXTQ(mr, r, r1) } + +// PEXTRB: Extract Byte. +// +// Forms: +// +// PEXTRB imm8 xmm r32 +// PEXTRB imm8 xmm m8 +// Construct and append a PEXTRB instruction to the active function. +func (c *Context) PEXTRB(i, x, mr operand.Op) { + if inst, err := x86.PEXTRB(i, x, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PEXTRB: Extract Byte. +// +// Forms: +// +// PEXTRB imm8 xmm r32 +// PEXTRB imm8 xmm m8 +// Construct and append a PEXTRB instruction to the active function. +// Operates on the global context. +func PEXTRB(i, x, mr operand.Op) { ctx.PEXTRB(i, x, mr) } + +// PEXTRD: Extract Doubleword. +// +// Forms: +// +// PEXTRD imm8 xmm r32 +// PEXTRD imm8 xmm m32 +// Construct and append a PEXTRD instruction to the active function. +func (c *Context) PEXTRD(i, x, mr operand.Op) { + if inst, err := x86.PEXTRD(i, x, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PEXTRD: Extract Doubleword. +// +// Forms: +// +// PEXTRD imm8 xmm r32 +// PEXTRD imm8 xmm m32 +// Construct and append a PEXTRD instruction to the active function. +// Operates on the global context. +func PEXTRD(i, x, mr operand.Op) { ctx.PEXTRD(i, x, mr) } + +// PEXTRQ: Extract Quadword. +// +// Forms: +// +// PEXTRQ imm8 xmm r64 +// PEXTRQ imm8 xmm m64 +// Construct and append a PEXTRQ instruction to the active function. +func (c *Context) PEXTRQ(i, x, mr operand.Op) { + if inst, err := x86.PEXTRQ(i, x, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PEXTRQ: Extract Quadword. +// +// Forms: +// +// PEXTRQ imm8 xmm r64 +// PEXTRQ imm8 xmm m64 +// Construct and append a PEXTRQ instruction to the active function. +// Operates on the global context. +func PEXTRQ(i, x, mr operand.Op) { ctx.PEXTRQ(i, x, mr) } + +// PEXTRW: Extract Word. +// +// Forms: +// +// PEXTRW imm8 xmm r32 +// PEXTRW imm8 xmm m16 +// Construct and append a PEXTRW instruction to the active function. +func (c *Context) PEXTRW(i, x, mr operand.Op) { + if inst, err := x86.PEXTRW(i, x, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PEXTRW: Extract Word. +// +// Forms: +// +// PEXTRW imm8 xmm r32 +// PEXTRW imm8 xmm m16 +// Construct and append a PEXTRW instruction to the active function. +// Operates on the global context. +func PEXTRW(i, x, mr operand.Op) { ctx.PEXTRW(i, x, mr) } + +// PHADDD: Packed Horizontal Add Doubleword Integer. +// +// Forms: +// +// PHADDD xmm xmm +// PHADDD m128 xmm +// Construct and append a PHADDD instruction to the active function. +func (c *Context) PHADDD(mx, x operand.Op) { + if inst, err := x86.PHADDD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PHADDD: Packed Horizontal Add Doubleword Integer. +// +// Forms: +// +// PHADDD xmm xmm +// PHADDD m128 xmm +// Construct and append a PHADDD instruction to the active function. +// Operates on the global context. +func PHADDD(mx, x operand.Op) { ctx.PHADDD(mx, x) } + +// PHADDSW: Packed Horizontal Add Signed Word Integers with Signed Saturation. +// +// Forms: +// +// PHADDSW xmm xmm +// PHADDSW m128 xmm +// Construct and append a PHADDSW instruction to the active function. +func (c *Context) PHADDSW(mx, x operand.Op) { + if inst, err := x86.PHADDSW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PHADDSW: Packed Horizontal Add Signed Word Integers with Signed Saturation. +// +// Forms: +// +// PHADDSW xmm xmm +// PHADDSW m128 xmm +// Construct and append a PHADDSW instruction to the active function. +// Operates on the global context. +func PHADDSW(mx, x operand.Op) { ctx.PHADDSW(mx, x) } + +// PHADDW: Packed Horizontal Add Word Integers. +// +// Forms: +// +// PHADDW xmm xmm +// PHADDW m128 xmm +// Construct and append a PHADDW instruction to the active function. +func (c *Context) PHADDW(mx, x operand.Op) { + if inst, err := x86.PHADDW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PHADDW: Packed Horizontal Add Word Integers. +// +// Forms: +// +// PHADDW xmm xmm +// PHADDW m128 xmm +// Construct and append a PHADDW instruction to the active function. +// Operates on the global context. +func PHADDW(mx, x operand.Op) { ctx.PHADDW(mx, x) } + +// PHMINPOSUW: Packed Horizontal Minimum of Unsigned Word Integers. +// +// Forms: +// +// PHMINPOSUW xmm xmm +// PHMINPOSUW m128 xmm +// Construct and append a PHMINPOSUW instruction to the active function. +func (c *Context) PHMINPOSUW(mx, x operand.Op) { + if inst, err := x86.PHMINPOSUW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PHMINPOSUW: Packed Horizontal Minimum of Unsigned Word Integers. +// +// Forms: +// +// PHMINPOSUW xmm xmm +// PHMINPOSUW m128 xmm +// Construct and append a PHMINPOSUW instruction to the active function. +// Operates on the global context. +func PHMINPOSUW(mx, x operand.Op) { ctx.PHMINPOSUW(mx, x) } + +// PHSUBD: Packed Horizontal Subtract Doubleword Integers. +// +// Forms: +// +// PHSUBD xmm xmm +// PHSUBD m128 xmm +// Construct and append a PHSUBD instruction to the active function. +func (c *Context) PHSUBD(mx, x operand.Op) { + if inst, err := x86.PHSUBD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PHSUBD: Packed Horizontal Subtract Doubleword Integers. +// +// Forms: +// +// PHSUBD xmm xmm +// PHSUBD m128 xmm +// Construct and append a PHSUBD instruction to the active function. +// Operates on the global context. +func PHSUBD(mx, x operand.Op) { ctx.PHSUBD(mx, x) } + +// PHSUBSW: Packed Horizontal Subtract Signed Word Integers with Signed Saturation. +// +// Forms: +// +// PHSUBSW xmm xmm +// PHSUBSW m128 xmm +// Construct and append a PHSUBSW instruction to the active function. +func (c *Context) PHSUBSW(mx, x operand.Op) { + if inst, err := x86.PHSUBSW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PHSUBSW: Packed Horizontal Subtract Signed Word Integers with Signed Saturation. +// +// Forms: +// +// PHSUBSW xmm xmm +// PHSUBSW m128 xmm +// Construct and append a PHSUBSW instruction to the active function. +// Operates on the global context. +func PHSUBSW(mx, x operand.Op) { ctx.PHSUBSW(mx, x) } + +// PHSUBW: Packed Horizontal Subtract Word Integers. +// +// Forms: +// +// PHSUBW xmm xmm +// PHSUBW m128 xmm +// Construct and append a PHSUBW instruction to the active function. +func (c *Context) PHSUBW(mx, x operand.Op) { + if inst, err := x86.PHSUBW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PHSUBW: Packed Horizontal Subtract Word Integers. +// +// Forms: +// +// PHSUBW xmm xmm +// PHSUBW m128 xmm +// Construct and append a PHSUBW instruction to the active function. +// Operates on the global context. +func PHSUBW(mx, x operand.Op) { ctx.PHSUBW(mx, x) } + +// PINSRB: Insert Byte. +// +// Forms: +// +// PINSRB imm8 r32 xmm +// PINSRB imm8 m8 xmm +// Construct and append a PINSRB instruction to the active function. +func (c *Context) PINSRB(i, mr, x operand.Op) { + if inst, err := x86.PINSRB(i, mr, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PINSRB: Insert Byte. +// +// Forms: +// +// PINSRB imm8 r32 xmm +// PINSRB imm8 m8 xmm +// Construct and append a PINSRB instruction to the active function. +// Operates on the global context. +func PINSRB(i, mr, x operand.Op) { ctx.PINSRB(i, mr, x) } + +// PINSRD: Insert Doubleword. +// +// Forms: +// +// PINSRD imm8 r32 xmm +// PINSRD imm8 m32 xmm +// Construct and append a PINSRD instruction to the active function. +func (c *Context) PINSRD(i, mr, x operand.Op) { + if inst, err := x86.PINSRD(i, mr, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PINSRD: Insert Doubleword. +// +// Forms: +// +// PINSRD imm8 r32 xmm +// PINSRD imm8 m32 xmm +// Construct and append a PINSRD instruction to the active function. +// Operates on the global context. +func PINSRD(i, mr, x operand.Op) { ctx.PINSRD(i, mr, x) } + +// PINSRQ: Insert Quadword. +// +// Forms: +// +// PINSRQ imm8 r64 xmm +// PINSRQ imm8 m64 xmm +// Construct and append a PINSRQ instruction to the active function. +func (c *Context) PINSRQ(i, mr, x operand.Op) { + if inst, err := x86.PINSRQ(i, mr, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PINSRQ: Insert Quadword. +// +// Forms: +// +// PINSRQ imm8 r64 xmm +// PINSRQ imm8 m64 xmm +// Construct and append a PINSRQ instruction to the active function. +// Operates on the global context. +func PINSRQ(i, mr, x operand.Op) { ctx.PINSRQ(i, mr, x) } + +// PINSRW: Insert Word. +// +// Forms: +// +// PINSRW imm8 r32 xmm +// PINSRW imm8 m16 xmm +// Construct and append a PINSRW instruction to the active function. +func (c *Context) PINSRW(i, mr, x operand.Op) { + if inst, err := x86.PINSRW(i, mr, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PINSRW: Insert Word. +// +// Forms: +// +// PINSRW imm8 r32 xmm +// PINSRW imm8 m16 xmm +// Construct and append a PINSRW instruction to the active function. +// Operates on the global context. +func PINSRW(i, mr, x operand.Op) { ctx.PINSRW(i, mr, x) } + +// PMADDUBSW: Multiply and Add Packed Signed and Unsigned Byte Integers. +// +// Forms: +// +// PMADDUBSW xmm xmm +// PMADDUBSW m128 xmm +// Construct and append a PMADDUBSW instruction to the active function. +func (c *Context) PMADDUBSW(mx, x operand.Op) { + if inst, err := x86.PMADDUBSW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMADDUBSW: Multiply and Add Packed Signed and Unsigned Byte Integers. +// +// Forms: +// +// PMADDUBSW xmm xmm +// PMADDUBSW m128 xmm +// Construct and append a PMADDUBSW instruction to the active function. +// Operates on the global context. +func PMADDUBSW(mx, x operand.Op) { ctx.PMADDUBSW(mx, x) } + +// PMADDWL: Multiply and Add Packed Signed Word Integers. +// +// Forms: +// +// PMADDWL xmm xmm +// PMADDWL m128 xmm +// Construct and append a PMADDWL instruction to the active function. +func (c *Context) PMADDWL(mx, x operand.Op) { + if inst, err := x86.PMADDWL(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMADDWL: Multiply and Add Packed Signed Word Integers. +// +// Forms: +// +// PMADDWL xmm xmm +// PMADDWL m128 xmm +// Construct and append a PMADDWL instruction to the active function. +// Operates on the global context. +func PMADDWL(mx, x operand.Op) { ctx.PMADDWL(mx, x) } + +// PMAXSB: Maximum of Packed Signed Byte Integers. +// +// Forms: +// +// PMAXSB xmm xmm +// PMAXSB m128 xmm +// Construct and append a PMAXSB instruction to the active function. +func (c *Context) PMAXSB(mx, x operand.Op) { + if inst, err := x86.PMAXSB(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMAXSB: Maximum of Packed Signed Byte Integers. +// +// Forms: +// +// PMAXSB xmm xmm +// PMAXSB m128 xmm +// Construct and append a PMAXSB instruction to the active function. +// Operates on the global context. +func PMAXSB(mx, x operand.Op) { ctx.PMAXSB(mx, x) } + +// PMAXSD: Maximum of Packed Signed Doubleword Integers. +// +// Forms: +// +// PMAXSD xmm xmm +// PMAXSD m128 xmm +// Construct and append a PMAXSD instruction to the active function. +func (c *Context) PMAXSD(mx, x operand.Op) { + if inst, err := x86.PMAXSD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMAXSD: Maximum of Packed Signed Doubleword Integers. +// +// Forms: +// +// PMAXSD xmm xmm +// PMAXSD m128 xmm +// Construct and append a PMAXSD instruction to the active function. +// Operates on the global context. +func PMAXSD(mx, x operand.Op) { ctx.PMAXSD(mx, x) } + +// PMAXSW: Maximum of Packed Signed Word Integers. +// +// Forms: +// +// PMAXSW xmm xmm +// PMAXSW m128 xmm +// Construct and append a PMAXSW instruction to the active function. +func (c *Context) PMAXSW(mx, x operand.Op) { + if inst, err := x86.PMAXSW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMAXSW: Maximum of Packed Signed Word Integers. +// +// Forms: +// +// PMAXSW xmm xmm +// PMAXSW m128 xmm +// Construct and append a PMAXSW instruction to the active function. +// Operates on the global context. +func PMAXSW(mx, x operand.Op) { ctx.PMAXSW(mx, x) } + +// PMAXUB: Maximum of Packed Unsigned Byte Integers. +// +// Forms: +// +// PMAXUB xmm xmm +// PMAXUB m128 xmm +// Construct and append a PMAXUB instruction to the active function. +func (c *Context) PMAXUB(mx, x operand.Op) { + if inst, err := x86.PMAXUB(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMAXUB: Maximum of Packed Unsigned Byte Integers. +// +// Forms: +// +// PMAXUB xmm xmm +// PMAXUB m128 xmm +// Construct and append a PMAXUB instruction to the active function. +// Operates on the global context. +func PMAXUB(mx, x operand.Op) { ctx.PMAXUB(mx, x) } + +// PMAXUD: Maximum of Packed Unsigned Doubleword Integers. +// +// Forms: +// +// PMAXUD xmm xmm +// PMAXUD m128 xmm +// Construct and append a PMAXUD instruction to the active function. +func (c *Context) PMAXUD(mx, x operand.Op) { + if inst, err := x86.PMAXUD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMAXUD: Maximum of Packed Unsigned Doubleword Integers. +// +// Forms: +// +// PMAXUD xmm xmm +// PMAXUD m128 xmm +// Construct and append a PMAXUD instruction to the active function. +// Operates on the global context. +func PMAXUD(mx, x operand.Op) { ctx.PMAXUD(mx, x) } + +// PMAXUW: Maximum of Packed Unsigned Word Integers. +// +// Forms: +// +// PMAXUW xmm xmm +// PMAXUW m128 xmm +// Construct and append a PMAXUW instruction to the active function. +func (c *Context) PMAXUW(mx, x operand.Op) { + if inst, err := x86.PMAXUW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMAXUW: Maximum of Packed Unsigned Word Integers. +// +// Forms: +// +// PMAXUW xmm xmm +// PMAXUW m128 xmm +// Construct and append a PMAXUW instruction to the active function. +// Operates on the global context. +func PMAXUW(mx, x operand.Op) { ctx.PMAXUW(mx, x) } + +// PMINSB: Minimum of Packed Signed Byte Integers. +// +// Forms: +// +// PMINSB xmm xmm +// PMINSB m128 xmm +// Construct and append a PMINSB instruction to the active function. +func (c *Context) PMINSB(mx, x operand.Op) { + if inst, err := x86.PMINSB(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMINSB: Minimum of Packed Signed Byte Integers. +// +// Forms: +// +// PMINSB xmm xmm +// PMINSB m128 xmm +// Construct and append a PMINSB instruction to the active function. +// Operates on the global context. +func PMINSB(mx, x operand.Op) { ctx.PMINSB(mx, x) } + +// PMINSD: Minimum of Packed Signed Doubleword Integers. +// +// Forms: +// +// PMINSD xmm xmm +// PMINSD m128 xmm +// Construct and append a PMINSD instruction to the active function. +func (c *Context) PMINSD(mx, x operand.Op) { + if inst, err := x86.PMINSD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMINSD: Minimum of Packed Signed Doubleword Integers. +// +// Forms: +// +// PMINSD xmm xmm +// PMINSD m128 xmm +// Construct and append a PMINSD instruction to the active function. +// Operates on the global context. +func PMINSD(mx, x operand.Op) { ctx.PMINSD(mx, x) } + +// PMINSW: Minimum of Packed Signed Word Integers. +// +// Forms: +// +// PMINSW xmm xmm +// PMINSW m128 xmm +// Construct and append a PMINSW instruction to the active function. +func (c *Context) PMINSW(mx, x operand.Op) { + if inst, err := x86.PMINSW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMINSW: Minimum of Packed Signed Word Integers. +// +// Forms: +// +// PMINSW xmm xmm +// PMINSW m128 xmm +// Construct and append a PMINSW instruction to the active function. +// Operates on the global context. +func PMINSW(mx, x operand.Op) { ctx.PMINSW(mx, x) } + +// PMINUB: Minimum of Packed Unsigned Byte Integers. +// +// Forms: +// +// PMINUB xmm xmm +// PMINUB m128 xmm +// Construct and append a PMINUB instruction to the active function. +func (c *Context) PMINUB(mx, x operand.Op) { + if inst, err := x86.PMINUB(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMINUB: Minimum of Packed Unsigned Byte Integers. +// +// Forms: +// +// PMINUB xmm xmm +// PMINUB m128 xmm +// Construct and append a PMINUB instruction to the active function. +// Operates on the global context. +func PMINUB(mx, x operand.Op) { ctx.PMINUB(mx, x) } + +// PMINUD: Minimum of Packed Unsigned Doubleword Integers. +// +// Forms: +// +// PMINUD xmm xmm +// PMINUD m128 xmm +// Construct and append a PMINUD instruction to the active function. +func (c *Context) PMINUD(mx, x operand.Op) { + if inst, err := x86.PMINUD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMINUD: Minimum of Packed Unsigned Doubleword Integers. +// +// Forms: +// +// PMINUD xmm xmm +// PMINUD m128 xmm +// Construct and append a PMINUD instruction to the active function. +// Operates on the global context. +func PMINUD(mx, x operand.Op) { ctx.PMINUD(mx, x) } + +// PMINUW: Minimum of Packed Unsigned Word Integers. +// +// Forms: +// +// PMINUW xmm xmm +// PMINUW m128 xmm +// Construct and append a PMINUW instruction to the active function. +func (c *Context) PMINUW(mx, x operand.Op) { + if inst, err := x86.PMINUW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMINUW: Minimum of Packed Unsigned Word Integers. +// +// Forms: +// +// PMINUW xmm xmm +// PMINUW m128 xmm +// Construct and append a PMINUW instruction to the active function. +// Operates on the global context. +func PMINUW(mx, x operand.Op) { ctx.PMINUW(mx, x) } + +// PMOVMSKB: Move Byte Mask. +// +// Forms: +// +// PMOVMSKB xmm r32 +// Construct and append a PMOVMSKB instruction to the active function. +func (c *Context) PMOVMSKB(x, r operand.Op) { + if inst, err := x86.PMOVMSKB(x, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMOVMSKB: Move Byte Mask. +// +// Forms: +// +// PMOVMSKB xmm r32 +// Construct and append a PMOVMSKB instruction to the active function. +// Operates on the global context. +func PMOVMSKB(x, r operand.Op) { ctx.PMOVMSKB(x, r) } + +// PMOVSXBD: Move Packed Byte Integers to Doubleword Integers with Sign Extension. +// +// Forms: +// +// PMOVSXBD xmm xmm +// PMOVSXBD m32 xmm +// Construct and append a PMOVSXBD instruction to the active function. +func (c *Context) PMOVSXBD(mx, x operand.Op) { + if inst, err := x86.PMOVSXBD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMOVSXBD: Move Packed Byte Integers to Doubleword Integers with Sign Extension. +// +// Forms: +// +// PMOVSXBD xmm xmm +// PMOVSXBD m32 xmm +// Construct and append a PMOVSXBD instruction to the active function. +// Operates on the global context. +func PMOVSXBD(mx, x operand.Op) { ctx.PMOVSXBD(mx, x) } + +// PMOVSXBQ: Move Packed Byte Integers to Quadword Integers with Sign Extension. +// +// Forms: +// +// PMOVSXBQ xmm xmm +// PMOVSXBQ m16 xmm +// Construct and append a PMOVSXBQ instruction to the active function. +func (c *Context) PMOVSXBQ(mx, x operand.Op) { + if inst, err := x86.PMOVSXBQ(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMOVSXBQ: Move Packed Byte Integers to Quadword Integers with Sign Extension. +// +// Forms: +// +// PMOVSXBQ xmm xmm +// PMOVSXBQ m16 xmm +// Construct and append a PMOVSXBQ instruction to the active function. +// Operates on the global context. +func PMOVSXBQ(mx, x operand.Op) { ctx.PMOVSXBQ(mx, x) } + +// PMOVSXBW: Move Packed Byte Integers to Word Integers with Sign Extension. +// +// Forms: +// +// PMOVSXBW xmm xmm +// PMOVSXBW m64 xmm +// Construct and append a PMOVSXBW instruction to the active function. +func (c *Context) PMOVSXBW(mx, x operand.Op) { + if inst, err := x86.PMOVSXBW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMOVSXBW: Move Packed Byte Integers to Word Integers with Sign Extension. +// +// Forms: +// +// PMOVSXBW xmm xmm +// PMOVSXBW m64 xmm +// Construct and append a PMOVSXBW instruction to the active function. +// Operates on the global context. +func PMOVSXBW(mx, x operand.Op) { ctx.PMOVSXBW(mx, x) } + +// PMOVSXDQ: Move Packed Doubleword Integers to Quadword Integers with Sign Extension. +// +// Forms: +// +// PMOVSXDQ xmm xmm +// PMOVSXDQ m64 xmm +// Construct and append a PMOVSXDQ instruction to the active function. +func (c *Context) PMOVSXDQ(mx, x operand.Op) { + if inst, err := x86.PMOVSXDQ(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMOVSXDQ: Move Packed Doubleword Integers to Quadword Integers with Sign Extension. +// +// Forms: +// +// PMOVSXDQ xmm xmm +// PMOVSXDQ m64 xmm +// Construct and append a PMOVSXDQ instruction to the active function. +// Operates on the global context. +func PMOVSXDQ(mx, x operand.Op) { ctx.PMOVSXDQ(mx, x) } + +// PMOVSXWD: Move Packed Word Integers to Doubleword Integers with Sign Extension. +// +// Forms: +// +// PMOVSXWD xmm xmm +// PMOVSXWD m64 xmm +// Construct and append a PMOVSXWD instruction to the active function. +func (c *Context) PMOVSXWD(mx, x operand.Op) { + if inst, err := x86.PMOVSXWD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMOVSXWD: Move Packed Word Integers to Doubleword Integers with Sign Extension. +// +// Forms: +// +// PMOVSXWD xmm xmm +// PMOVSXWD m64 xmm +// Construct and append a PMOVSXWD instruction to the active function. +// Operates on the global context. +func PMOVSXWD(mx, x operand.Op) { ctx.PMOVSXWD(mx, x) } + +// PMOVSXWQ: Move Packed Word Integers to Quadword Integers with Sign Extension. +// +// Forms: +// +// PMOVSXWQ xmm xmm +// PMOVSXWQ m32 xmm +// Construct and append a PMOVSXWQ instruction to the active function. +func (c *Context) PMOVSXWQ(mx, x operand.Op) { + if inst, err := x86.PMOVSXWQ(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMOVSXWQ: Move Packed Word Integers to Quadword Integers with Sign Extension. +// +// Forms: +// +// PMOVSXWQ xmm xmm +// PMOVSXWQ m32 xmm +// Construct and append a PMOVSXWQ instruction to the active function. +// Operates on the global context. +func PMOVSXWQ(mx, x operand.Op) { ctx.PMOVSXWQ(mx, x) } + +// PMOVZXBD: Move Packed Byte Integers to Doubleword Integers with Zero Extension. +// +// Forms: +// +// PMOVZXBD xmm xmm +// PMOVZXBD m32 xmm +// Construct and append a PMOVZXBD instruction to the active function. +func (c *Context) PMOVZXBD(mx, x operand.Op) { + if inst, err := x86.PMOVZXBD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMOVZXBD: Move Packed Byte Integers to Doubleword Integers with Zero Extension. +// +// Forms: +// +// PMOVZXBD xmm xmm +// PMOVZXBD m32 xmm +// Construct and append a PMOVZXBD instruction to the active function. +// Operates on the global context. +func PMOVZXBD(mx, x operand.Op) { ctx.PMOVZXBD(mx, x) } + +// PMOVZXBQ: Move Packed Byte Integers to Quadword Integers with Zero Extension. +// +// Forms: +// +// PMOVZXBQ xmm xmm +// PMOVZXBQ m16 xmm +// Construct and append a PMOVZXBQ instruction to the active function. +func (c *Context) PMOVZXBQ(mx, x operand.Op) { + if inst, err := x86.PMOVZXBQ(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMOVZXBQ: Move Packed Byte Integers to Quadword Integers with Zero Extension. +// +// Forms: +// +// PMOVZXBQ xmm xmm +// PMOVZXBQ m16 xmm +// Construct and append a PMOVZXBQ instruction to the active function. +// Operates on the global context. +func PMOVZXBQ(mx, x operand.Op) { ctx.PMOVZXBQ(mx, x) } + +// PMOVZXBW: Move Packed Byte Integers to Word Integers with Zero Extension. +// +// Forms: +// +// PMOVZXBW xmm xmm +// PMOVZXBW m64 xmm +// Construct and append a PMOVZXBW instruction to the active function. +func (c *Context) PMOVZXBW(mx, x operand.Op) { + if inst, err := x86.PMOVZXBW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMOVZXBW: Move Packed Byte Integers to Word Integers with Zero Extension. +// +// Forms: +// +// PMOVZXBW xmm xmm +// PMOVZXBW m64 xmm +// Construct and append a PMOVZXBW instruction to the active function. +// Operates on the global context. +func PMOVZXBW(mx, x operand.Op) { ctx.PMOVZXBW(mx, x) } + +// PMOVZXDQ: Move Packed Doubleword Integers to Quadword Integers with Zero Extension. +// +// Forms: +// +// PMOVZXDQ xmm xmm +// PMOVZXDQ m64 xmm +// Construct and append a PMOVZXDQ instruction to the active function. +func (c *Context) PMOVZXDQ(mx, x operand.Op) { + if inst, err := x86.PMOVZXDQ(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMOVZXDQ: Move Packed Doubleword Integers to Quadword Integers with Zero Extension. +// +// Forms: +// +// PMOVZXDQ xmm xmm +// PMOVZXDQ m64 xmm +// Construct and append a PMOVZXDQ instruction to the active function. +// Operates on the global context. +func PMOVZXDQ(mx, x operand.Op) { ctx.PMOVZXDQ(mx, x) } + +// PMOVZXWD: Move Packed Word Integers to Doubleword Integers with Zero Extension. +// +// Forms: +// +// PMOVZXWD xmm xmm +// PMOVZXWD m64 xmm +// Construct and append a PMOVZXWD instruction to the active function. +func (c *Context) PMOVZXWD(mx, x operand.Op) { + if inst, err := x86.PMOVZXWD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMOVZXWD: Move Packed Word Integers to Doubleword Integers with Zero Extension. +// +// Forms: +// +// PMOVZXWD xmm xmm +// PMOVZXWD m64 xmm +// Construct and append a PMOVZXWD instruction to the active function. +// Operates on the global context. +func PMOVZXWD(mx, x operand.Op) { ctx.PMOVZXWD(mx, x) } + +// PMOVZXWQ: Move Packed Word Integers to Quadword Integers with Zero Extension. +// +// Forms: +// +// PMOVZXWQ xmm xmm +// PMOVZXWQ m32 xmm +// Construct and append a PMOVZXWQ instruction to the active function. +func (c *Context) PMOVZXWQ(mx, x operand.Op) { + if inst, err := x86.PMOVZXWQ(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMOVZXWQ: Move Packed Word Integers to Quadword Integers with Zero Extension. +// +// Forms: +// +// PMOVZXWQ xmm xmm +// PMOVZXWQ m32 xmm +// Construct and append a PMOVZXWQ instruction to the active function. +// Operates on the global context. +func PMOVZXWQ(mx, x operand.Op) { ctx.PMOVZXWQ(mx, x) } + +// PMULDQ: Multiply Packed Signed Doubleword Integers and Store Quadword Result. +// +// Forms: +// +// PMULDQ xmm xmm +// PMULDQ m128 xmm +// Construct and append a PMULDQ instruction to the active function. +func (c *Context) PMULDQ(mx, x operand.Op) { + if inst, err := x86.PMULDQ(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMULDQ: Multiply Packed Signed Doubleword Integers and Store Quadword Result. +// +// Forms: +// +// PMULDQ xmm xmm +// PMULDQ m128 xmm +// Construct and append a PMULDQ instruction to the active function. +// Operates on the global context. +func PMULDQ(mx, x operand.Op) { ctx.PMULDQ(mx, x) } + +// PMULHRSW: Packed Multiply Signed Word Integers and Store High Result with Round and Scale. +// +// Forms: +// +// PMULHRSW xmm xmm +// PMULHRSW m128 xmm +// Construct and append a PMULHRSW instruction to the active function. +func (c *Context) PMULHRSW(mx, x operand.Op) { + if inst, err := x86.PMULHRSW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMULHRSW: Packed Multiply Signed Word Integers and Store High Result with Round and Scale. +// +// Forms: +// +// PMULHRSW xmm xmm +// PMULHRSW m128 xmm +// Construct and append a PMULHRSW instruction to the active function. +// Operates on the global context. +func PMULHRSW(mx, x operand.Op) { ctx.PMULHRSW(mx, x) } + +// PMULHUW: Multiply Packed Unsigned Word Integers and Store High Result. +// +// Forms: +// +// PMULHUW xmm xmm +// PMULHUW m128 xmm +// Construct and append a PMULHUW instruction to the active function. +func (c *Context) PMULHUW(mx, x operand.Op) { + if inst, err := x86.PMULHUW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMULHUW: Multiply Packed Unsigned Word Integers and Store High Result. +// +// Forms: +// +// PMULHUW xmm xmm +// PMULHUW m128 xmm +// Construct and append a PMULHUW instruction to the active function. +// Operates on the global context. +func PMULHUW(mx, x operand.Op) { ctx.PMULHUW(mx, x) } + +// PMULHW: Multiply Packed Signed Word Integers and Store High Result. +// +// Forms: +// +// PMULHW xmm xmm +// PMULHW m128 xmm +// Construct and append a PMULHW instruction to the active function. +func (c *Context) PMULHW(mx, x operand.Op) { + if inst, err := x86.PMULHW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMULHW: Multiply Packed Signed Word Integers and Store High Result. +// +// Forms: +// +// PMULHW xmm xmm +// PMULHW m128 xmm +// Construct and append a PMULHW instruction to the active function. +// Operates on the global context. +func PMULHW(mx, x operand.Op) { ctx.PMULHW(mx, x) } + +// PMULLD: Multiply Packed Signed Doubleword Integers and Store Low Result. +// +// Forms: +// +// PMULLD xmm xmm +// PMULLD m128 xmm +// Construct and append a PMULLD instruction to the active function. +func (c *Context) PMULLD(mx, x operand.Op) { + if inst, err := x86.PMULLD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMULLD: Multiply Packed Signed Doubleword Integers and Store Low Result. +// +// Forms: +// +// PMULLD xmm xmm +// PMULLD m128 xmm +// Construct and append a PMULLD instruction to the active function. +// Operates on the global context. +func PMULLD(mx, x operand.Op) { ctx.PMULLD(mx, x) } + +// PMULLW: Multiply Packed Signed Word Integers and Store Low Result. +// +// Forms: +// +// PMULLW xmm xmm +// PMULLW m128 xmm +// Construct and append a PMULLW instruction to the active function. +func (c *Context) PMULLW(mx, x operand.Op) { + if inst, err := x86.PMULLW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMULLW: Multiply Packed Signed Word Integers and Store Low Result. +// +// Forms: +// +// PMULLW xmm xmm +// PMULLW m128 xmm +// Construct and append a PMULLW instruction to the active function. +// Operates on the global context. +func PMULLW(mx, x operand.Op) { ctx.PMULLW(mx, x) } + +// PMULULQ: Multiply Packed Unsigned Doubleword Integers. +// +// Forms: +// +// PMULULQ xmm xmm +// PMULULQ m128 xmm +// Construct and append a PMULULQ instruction to the active function. +func (c *Context) PMULULQ(mx, x operand.Op) { + if inst, err := x86.PMULULQ(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PMULULQ: Multiply Packed Unsigned Doubleword Integers. +// +// Forms: +// +// PMULULQ xmm xmm +// PMULULQ m128 xmm +// Construct and append a PMULULQ instruction to the active function. +// Operates on the global context. +func PMULULQ(mx, x operand.Op) { ctx.PMULULQ(mx, x) } + +// POPCNTL: Count of Number of Bits Set to 1. +// +// Forms: +// +// POPCNTL r32 r32 +// POPCNTL m32 r32 +// Construct and append a POPCNTL instruction to the active function. +func (c *Context) POPCNTL(mr, r operand.Op) { + if inst, err := x86.POPCNTL(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// POPCNTL: Count of Number of Bits Set to 1. +// +// Forms: +// +// POPCNTL r32 r32 +// POPCNTL m32 r32 +// Construct and append a POPCNTL instruction to the active function. +// Operates on the global context. +func POPCNTL(mr, r operand.Op) { ctx.POPCNTL(mr, r) } + +// POPCNTQ: Count of Number of Bits Set to 1. +// +// Forms: +// +// POPCNTQ r64 r64 +// POPCNTQ m64 r64 +// Construct and append a POPCNTQ instruction to the active function. +func (c *Context) POPCNTQ(mr, r operand.Op) { + if inst, err := x86.POPCNTQ(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// POPCNTQ: Count of Number of Bits Set to 1. +// +// Forms: +// +// POPCNTQ r64 r64 +// POPCNTQ m64 r64 +// Construct and append a POPCNTQ instruction to the active function. +// Operates on the global context. +func POPCNTQ(mr, r operand.Op) { ctx.POPCNTQ(mr, r) } + +// POPCNTW: Count of Number of Bits Set to 1. +// +// Forms: +// +// POPCNTW r16 r16 +// POPCNTW m16 r16 +// Construct and append a POPCNTW instruction to the active function. +func (c *Context) POPCNTW(mr, r operand.Op) { + if inst, err := x86.POPCNTW(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// POPCNTW: Count of Number of Bits Set to 1. +// +// Forms: +// +// POPCNTW r16 r16 +// POPCNTW m16 r16 +// Construct and append a POPCNTW instruction to the active function. +// Operates on the global context. +func POPCNTW(mr, r operand.Op) { ctx.POPCNTW(mr, r) } + +// POPQ: Pop a Value from the Stack. +// +// Forms: +// +// POPQ r64 +// POPQ m64 +// Construct and append a POPQ instruction to the active function. +func (c *Context) POPQ(mr operand.Op) { + if inst, err := x86.POPQ(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// POPQ: Pop a Value from the Stack. +// +// Forms: +// +// POPQ r64 +// POPQ m64 +// Construct and append a POPQ instruction to the active function. +// Operates on the global context. +func POPQ(mr operand.Op) { ctx.POPQ(mr) } + +// POPW: Pop a Value from the Stack. +// +// Forms: +// +// POPW r16 +// POPW m16 +// Construct and append a POPW instruction to the active function. +func (c *Context) POPW(mr operand.Op) { + if inst, err := x86.POPW(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// POPW: Pop a Value from the Stack. +// +// Forms: +// +// POPW r16 +// POPW m16 +// Construct and append a POPW instruction to the active function. +// Operates on the global context. +func POPW(mr operand.Op) { ctx.POPW(mr) } + +// POR: Packed Bitwise Logical OR. +// +// Forms: +// +// POR xmm xmm +// POR m128 xmm +// Construct and append a POR instruction to the active function. +func (c *Context) POR(mx, x operand.Op) { + if inst, err := x86.POR(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// POR: Packed Bitwise Logical OR. +// +// Forms: +// +// POR xmm xmm +// POR m128 xmm +// Construct and append a POR instruction to the active function. +// Operates on the global context. +func POR(mx, x operand.Op) { ctx.POR(mx, x) } + +// PREFETCHNTA: Prefetch Data Into Caches using NTA Hint. +// +// Forms: +// +// PREFETCHNTA m8 +// Construct and append a PREFETCHNTA instruction to the active function. +func (c *Context) PREFETCHNTA(m operand.Op) { + if inst, err := x86.PREFETCHNTA(m); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PREFETCHNTA: Prefetch Data Into Caches using NTA Hint. +// +// Forms: +// +// PREFETCHNTA m8 +// Construct and append a PREFETCHNTA instruction to the active function. +// Operates on the global context. +func PREFETCHNTA(m operand.Op) { ctx.PREFETCHNTA(m) } + +// PREFETCHT0: Prefetch Data Into Caches using T0 Hint. +// +// Forms: +// +// PREFETCHT0 m8 +// Construct and append a PREFETCHT0 instruction to the active function. +func (c *Context) PREFETCHT0(m operand.Op) { + if inst, err := x86.PREFETCHT0(m); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PREFETCHT0: Prefetch Data Into Caches using T0 Hint. +// +// Forms: +// +// PREFETCHT0 m8 +// Construct and append a PREFETCHT0 instruction to the active function. +// Operates on the global context. +func PREFETCHT0(m operand.Op) { ctx.PREFETCHT0(m) } + +// PREFETCHT1: Prefetch Data Into Caches using T1 Hint. +// +// Forms: +// +// PREFETCHT1 m8 +// Construct and append a PREFETCHT1 instruction to the active function. +func (c *Context) PREFETCHT1(m operand.Op) { + if inst, err := x86.PREFETCHT1(m); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PREFETCHT1: Prefetch Data Into Caches using T1 Hint. +// +// Forms: +// +// PREFETCHT1 m8 +// Construct and append a PREFETCHT1 instruction to the active function. +// Operates on the global context. +func PREFETCHT1(m operand.Op) { ctx.PREFETCHT1(m) } + +// PREFETCHT2: Prefetch Data Into Caches using T2 Hint. +// +// Forms: +// +// PREFETCHT2 m8 +// Construct and append a PREFETCHT2 instruction to the active function. +func (c *Context) PREFETCHT2(m operand.Op) { + if inst, err := x86.PREFETCHT2(m); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PREFETCHT2: Prefetch Data Into Caches using T2 Hint. +// +// Forms: +// +// PREFETCHT2 m8 +// Construct and append a PREFETCHT2 instruction to the active function. +// Operates on the global context. +func PREFETCHT2(m operand.Op) { ctx.PREFETCHT2(m) } + +// PSADBW: Compute Sum of Absolute Differences. +// +// Forms: +// +// PSADBW xmm xmm +// PSADBW m128 xmm +// Construct and append a PSADBW instruction to the active function. +func (c *Context) PSADBW(mx, x operand.Op) { + if inst, err := x86.PSADBW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSADBW: Compute Sum of Absolute Differences. +// +// Forms: +// +// PSADBW xmm xmm +// PSADBW m128 xmm +// Construct and append a PSADBW instruction to the active function. +// Operates on the global context. +func PSADBW(mx, x operand.Op) { ctx.PSADBW(mx, x) } + +// PSHUFB: Packed Shuffle Bytes. +// +// Forms: +// +// PSHUFB xmm xmm +// PSHUFB m128 xmm +// Construct and append a PSHUFB instruction to the active function. +func (c *Context) PSHUFB(mx, x operand.Op) { + if inst, err := x86.PSHUFB(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSHUFB: Packed Shuffle Bytes. +// +// Forms: +// +// PSHUFB xmm xmm +// PSHUFB m128 xmm +// Construct and append a PSHUFB instruction to the active function. +// Operates on the global context. +func PSHUFB(mx, x operand.Op) { ctx.PSHUFB(mx, x) } + +// PSHUFD: Shuffle Packed Doublewords. +// +// Forms: +// +// PSHUFD imm8 xmm xmm +// PSHUFD imm8 m128 xmm +// Construct and append a PSHUFD instruction to the active function. +func (c *Context) PSHUFD(i, mx, x operand.Op) { + if inst, err := x86.PSHUFD(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSHUFD: Shuffle Packed Doublewords. +// +// Forms: +// +// PSHUFD imm8 xmm xmm +// PSHUFD imm8 m128 xmm +// Construct and append a PSHUFD instruction to the active function. +// Operates on the global context. +func PSHUFD(i, mx, x operand.Op) { ctx.PSHUFD(i, mx, x) } + +// PSHUFHW: Shuffle Packed High Words. +// +// Forms: +// +// PSHUFHW imm8 xmm xmm +// PSHUFHW imm8 m128 xmm +// Construct and append a PSHUFHW instruction to the active function. +func (c *Context) PSHUFHW(i, mx, x operand.Op) { + if inst, err := x86.PSHUFHW(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSHUFHW: Shuffle Packed High Words. +// +// Forms: +// +// PSHUFHW imm8 xmm xmm +// PSHUFHW imm8 m128 xmm +// Construct and append a PSHUFHW instruction to the active function. +// Operates on the global context. +func PSHUFHW(i, mx, x operand.Op) { ctx.PSHUFHW(i, mx, x) } + +// PSHUFL: Shuffle Packed Doublewords. +// +// Forms: +// +// PSHUFL imm8 xmm xmm +// PSHUFL imm8 m128 xmm +// Construct and append a PSHUFL instruction to the active function. +func (c *Context) PSHUFL(i, mx, x operand.Op) { + if inst, err := x86.PSHUFL(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSHUFL: Shuffle Packed Doublewords. +// +// Forms: +// +// PSHUFL imm8 xmm xmm +// PSHUFL imm8 m128 xmm +// Construct and append a PSHUFL instruction to the active function. +// Operates on the global context. +func PSHUFL(i, mx, x operand.Op) { ctx.PSHUFL(i, mx, x) } + +// PSHUFLW: Shuffle Packed Low Words. +// +// Forms: +// +// PSHUFLW imm8 xmm xmm +// PSHUFLW imm8 m128 xmm +// Construct and append a PSHUFLW instruction to the active function. +func (c *Context) PSHUFLW(i, mx, x operand.Op) { + if inst, err := x86.PSHUFLW(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSHUFLW: Shuffle Packed Low Words. +// +// Forms: +// +// PSHUFLW imm8 xmm xmm +// PSHUFLW imm8 m128 xmm +// Construct and append a PSHUFLW instruction to the active function. +// Operates on the global context. +func PSHUFLW(i, mx, x operand.Op) { ctx.PSHUFLW(i, mx, x) } + +// PSIGNB: Packed Sign of Byte Integers. +// +// Forms: +// +// PSIGNB xmm xmm +// PSIGNB m128 xmm +// Construct and append a PSIGNB instruction to the active function. +func (c *Context) PSIGNB(mx, x operand.Op) { + if inst, err := x86.PSIGNB(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSIGNB: Packed Sign of Byte Integers. +// +// Forms: +// +// PSIGNB xmm xmm +// PSIGNB m128 xmm +// Construct and append a PSIGNB instruction to the active function. +// Operates on the global context. +func PSIGNB(mx, x operand.Op) { ctx.PSIGNB(mx, x) } + +// PSIGND: Packed Sign of Doubleword Integers. +// +// Forms: +// +// PSIGND xmm xmm +// PSIGND m128 xmm +// Construct and append a PSIGND instruction to the active function. +func (c *Context) PSIGND(mx, x operand.Op) { + if inst, err := x86.PSIGND(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSIGND: Packed Sign of Doubleword Integers. +// +// Forms: +// +// PSIGND xmm xmm +// PSIGND m128 xmm +// Construct and append a PSIGND instruction to the active function. +// Operates on the global context. +func PSIGND(mx, x operand.Op) { ctx.PSIGND(mx, x) } + +// PSIGNW: Packed Sign of Word Integers. +// +// Forms: +// +// PSIGNW xmm xmm +// PSIGNW m128 xmm +// Construct and append a PSIGNW instruction to the active function. +func (c *Context) PSIGNW(mx, x operand.Op) { + if inst, err := x86.PSIGNW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSIGNW: Packed Sign of Word Integers. +// +// Forms: +// +// PSIGNW xmm xmm +// PSIGNW m128 xmm +// Construct and append a PSIGNW instruction to the active function. +// Operates on the global context. +func PSIGNW(mx, x operand.Op) { ctx.PSIGNW(mx, x) } + +// PSLLDQ: Shift Packed Double Quadword Left Logical. +// +// Forms: +// +// PSLLDQ imm8 xmm +// Construct and append a PSLLDQ instruction to the active function. +func (c *Context) PSLLDQ(i, x operand.Op) { + if inst, err := x86.PSLLDQ(i, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSLLDQ: Shift Packed Double Quadword Left Logical. +// +// Forms: +// +// PSLLDQ imm8 xmm +// Construct and append a PSLLDQ instruction to the active function. +// Operates on the global context. +func PSLLDQ(i, x operand.Op) { ctx.PSLLDQ(i, x) } + +// PSLLL: Shift Packed Doubleword Data Left Logical. +// +// Forms: +// +// PSLLL imm8 xmm +// PSLLL xmm xmm +// PSLLL m128 xmm +// Construct and append a PSLLL instruction to the active function. +func (c *Context) PSLLL(imx, x operand.Op) { + if inst, err := x86.PSLLL(imx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSLLL: Shift Packed Doubleword Data Left Logical. +// +// Forms: +// +// PSLLL imm8 xmm +// PSLLL xmm xmm +// PSLLL m128 xmm +// Construct and append a PSLLL instruction to the active function. +// Operates on the global context. +func PSLLL(imx, x operand.Op) { ctx.PSLLL(imx, x) } + +// PSLLO: Shift Packed Double Quadword Left Logical. +// +// Forms: +// +// PSLLO imm8 xmm +// Construct and append a PSLLO instruction to the active function. +func (c *Context) PSLLO(i, x operand.Op) { + if inst, err := x86.PSLLO(i, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSLLO: Shift Packed Double Quadword Left Logical. +// +// Forms: +// +// PSLLO imm8 xmm +// Construct and append a PSLLO instruction to the active function. +// Operates on the global context. +func PSLLO(i, x operand.Op) { ctx.PSLLO(i, x) } + +// PSLLQ: Shift Packed Quadword Data Left Logical. +// +// Forms: +// +// PSLLQ imm8 xmm +// PSLLQ xmm xmm +// PSLLQ m128 xmm +// Construct and append a PSLLQ instruction to the active function. +func (c *Context) PSLLQ(imx, x operand.Op) { + if inst, err := x86.PSLLQ(imx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSLLQ: Shift Packed Quadword Data Left Logical. +// +// Forms: +// +// PSLLQ imm8 xmm +// PSLLQ xmm xmm +// PSLLQ m128 xmm +// Construct and append a PSLLQ instruction to the active function. +// Operates on the global context. +func PSLLQ(imx, x operand.Op) { ctx.PSLLQ(imx, x) } + +// PSLLW: Shift Packed Word Data Left Logical. +// +// Forms: +// +// PSLLW imm8 xmm +// PSLLW xmm xmm +// PSLLW m128 xmm +// Construct and append a PSLLW instruction to the active function. +func (c *Context) PSLLW(imx, x operand.Op) { + if inst, err := x86.PSLLW(imx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSLLW: Shift Packed Word Data Left Logical. +// +// Forms: +// +// PSLLW imm8 xmm +// PSLLW xmm xmm +// PSLLW m128 xmm +// Construct and append a PSLLW instruction to the active function. +// Operates on the global context. +func PSLLW(imx, x operand.Op) { ctx.PSLLW(imx, x) } + +// PSRAL: Shift Packed Doubleword Data Right Arithmetic. +// +// Forms: +// +// PSRAL imm8 xmm +// PSRAL xmm xmm +// PSRAL m128 xmm +// Construct and append a PSRAL instruction to the active function. +func (c *Context) PSRAL(imx, x operand.Op) { + if inst, err := x86.PSRAL(imx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSRAL: Shift Packed Doubleword Data Right Arithmetic. +// +// Forms: +// +// PSRAL imm8 xmm +// PSRAL xmm xmm +// PSRAL m128 xmm +// Construct and append a PSRAL instruction to the active function. +// Operates on the global context. +func PSRAL(imx, x operand.Op) { ctx.PSRAL(imx, x) } + +// PSRAW: Shift Packed Word Data Right Arithmetic. +// +// Forms: +// +// PSRAW imm8 xmm +// PSRAW xmm xmm +// PSRAW m128 xmm +// Construct and append a PSRAW instruction to the active function. +func (c *Context) PSRAW(imx, x operand.Op) { + if inst, err := x86.PSRAW(imx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSRAW: Shift Packed Word Data Right Arithmetic. +// +// Forms: +// +// PSRAW imm8 xmm +// PSRAW xmm xmm +// PSRAW m128 xmm +// Construct and append a PSRAW instruction to the active function. +// Operates on the global context. +func PSRAW(imx, x operand.Op) { ctx.PSRAW(imx, x) } + +// PSRLDQ: Shift Packed Double Quadword Right Logical. +// +// Forms: +// +// PSRLDQ imm8 xmm +// Construct and append a PSRLDQ instruction to the active function. +func (c *Context) PSRLDQ(i, x operand.Op) { + if inst, err := x86.PSRLDQ(i, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSRLDQ: Shift Packed Double Quadword Right Logical. +// +// Forms: +// +// PSRLDQ imm8 xmm +// Construct and append a PSRLDQ instruction to the active function. +// Operates on the global context. +func PSRLDQ(i, x operand.Op) { ctx.PSRLDQ(i, x) } + +// PSRLL: Shift Packed Doubleword Data Right Logical. +// +// Forms: +// +// PSRLL imm8 xmm +// PSRLL xmm xmm +// PSRLL m128 xmm +// Construct and append a PSRLL instruction to the active function. +func (c *Context) PSRLL(imx, x operand.Op) { + if inst, err := x86.PSRLL(imx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSRLL: Shift Packed Doubleword Data Right Logical. +// +// Forms: +// +// PSRLL imm8 xmm +// PSRLL xmm xmm +// PSRLL m128 xmm +// Construct and append a PSRLL instruction to the active function. +// Operates on the global context. +func PSRLL(imx, x operand.Op) { ctx.PSRLL(imx, x) } + +// PSRLO: Shift Packed Double Quadword Right Logical. +// +// Forms: +// +// PSRLO imm8 xmm +// Construct and append a PSRLO instruction to the active function. +func (c *Context) PSRLO(i, x operand.Op) { + if inst, err := x86.PSRLO(i, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSRLO: Shift Packed Double Quadword Right Logical. +// +// Forms: +// +// PSRLO imm8 xmm +// Construct and append a PSRLO instruction to the active function. +// Operates on the global context. +func PSRLO(i, x operand.Op) { ctx.PSRLO(i, x) } + +// PSRLQ: Shift Packed Quadword Data Right Logical. +// +// Forms: +// +// PSRLQ imm8 xmm +// PSRLQ xmm xmm +// PSRLQ m128 xmm +// Construct and append a PSRLQ instruction to the active function. +func (c *Context) PSRLQ(imx, x operand.Op) { + if inst, err := x86.PSRLQ(imx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSRLQ: Shift Packed Quadword Data Right Logical. +// +// Forms: +// +// PSRLQ imm8 xmm +// PSRLQ xmm xmm +// PSRLQ m128 xmm +// Construct and append a PSRLQ instruction to the active function. +// Operates on the global context. +func PSRLQ(imx, x operand.Op) { ctx.PSRLQ(imx, x) } + +// PSRLW: Shift Packed Word Data Right Logical. +// +// Forms: +// +// PSRLW imm8 xmm +// PSRLW xmm xmm +// PSRLW m128 xmm +// Construct and append a PSRLW instruction to the active function. +func (c *Context) PSRLW(imx, x operand.Op) { + if inst, err := x86.PSRLW(imx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSRLW: Shift Packed Word Data Right Logical. +// +// Forms: +// +// PSRLW imm8 xmm +// PSRLW xmm xmm +// PSRLW m128 xmm +// Construct and append a PSRLW instruction to the active function. +// Operates on the global context. +func PSRLW(imx, x operand.Op) { ctx.PSRLW(imx, x) } + +// PSUBB: Subtract Packed Byte Integers. +// +// Forms: +// +// PSUBB xmm xmm +// PSUBB m128 xmm +// Construct and append a PSUBB instruction to the active function. +func (c *Context) PSUBB(mx, x operand.Op) { + if inst, err := x86.PSUBB(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSUBB: Subtract Packed Byte Integers. +// +// Forms: +// +// PSUBB xmm xmm +// PSUBB m128 xmm +// Construct and append a PSUBB instruction to the active function. +// Operates on the global context. +func PSUBB(mx, x operand.Op) { ctx.PSUBB(mx, x) } + +// PSUBL: Subtract Packed Doubleword Integers. +// +// Forms: +// +// PSUBL xmm xmm +// PSUBL m128 xmm +// Construct and append a PSUBL instruction to the active function. +func (c *Context) PSUBL(mx, x operand.Op) { + if inst, err := x86.PSUBL(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSUBL: Subtract Packed Doubleword Integers. +// +// Forms: +// +// PSUBL xmm xmm +// PSUBL m128 xmm +// Construct and append a PSUBL instruction to the active function. +// Operates on the global context. +func PSUBL(mx, x operand.Op) { ctx.PSUBL(mx, x) } + +// PSUBQ: Subtract Packed Quadword Integers. +// +// Forms: +// +// PSUBQ xmm xmm +// PSUBQ m128 xmm +// Construct and append a PSUBQ instruction to the active function. +func (c *Context) PSUBQ(mx, x operand.Op) { + if inst, err := x86.PSUBQ(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSUBQ: Subtract Packed Quadword Integers. +// +// Forms: +// +// PSUBQ xmm xmm +// PSUBQ m128 xmm +// Construct and append a PSUBQ instruction to the active function. +// Operates on the global context. +func PSUBQ(mx, x operand.Op) { ctx.PSUBQ(mx, x) } + +// PSUBSB: Subtract Packed Signed Byte Integers with Signed Saturation. +// +// Forms: +// +// PSUBSB xmm xmm +// PSUBSB m128 xmm +// Construct and append a PSUBSB instruction to the active function. +func (c *Context) PSUBSB(mx, x operand.Op) { + if inst, err := x86.PSUBSB(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSUBSB: Subtract Packed Signed Byte Integers with Signed Saturation. +// +// Forms: +// +// PSUBSB xmm xmm +// PSUBSB m128 xmm +// Construct and append a PSUBSB instruction to the active function. +// Operates on the global context. +func PSUBSB(mx, x operand.Op) { ctx.PSUBSB(mx, x) } + +// PSUBSW: Subtract Packed Signed Word Integers with Signed Saturation. +// +// Forms: +// +// PSUBSW xmm xmm +// PSUBSW m128 xmm +// Construct and append a PSUBSW instruction to the active function. +func (c *Context) PSUBSW(mx, x operand.Op) { + if inst, err := x86.PSUBSW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSUBSW: Subtract Packed Signed Word Integers with Signed Saturation. +// +// Forms: +// +// PSUBSW xmm xmm +// PSUBSW m128 xmm +// Construct and append a PSUBSW instruction to the active function. +// Operates on the global context. +func PSUBSW(mx, x operand.Op) { ctx.PSUBSW(mx, x) } + +// PSUBUSB: Subtract Packed Unsigned Byte Integers with Unsigned Saturation. +// +// Forms: +// +// PSUBUSB xmm xmm +// PSUBUSB m128 xmm +// Construct and append a PSUBUSB instruction to the active function. +func (c *Context) PSUBUSB(mx, x operand.Op) { + if inst, err := x86.PSUBUSB(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSUBUSB: Subtract Packed Unsigned Byte Integers with Unsigned Saturation. +// +// Forms: +// +// PSUBUSB xmm xmm +// PSUBUSB m128 xmm +// Construct and append a PSUBUSB instruction to the active function. +// Operates on the global context. +func PSUBUSB(mx, x operand.Op) { ctx.PSUBUSB(mx, x) } + +// PSUBUSW: Subtract Packed Unsigned Word Integers with Unsigned Saturation. +// +// Forms: +// +// PSUBUSW xmm xmm +// PSUBUSW m128 xmm +// Construct and append a PSUBUSW instruction to the active function. +func (c *Context) PSUBUSW(mx, x operand.Op) { + if inst, err := x86.PSUBUSW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSUBUSW: Subtract Packed Unsigned Word Integers with Unsigned Saturation. +// +// Forms: +// +// PSUBUSW xmm xmm +// PSUBUSW m128 xmm +// Construct and append a PSUBUSW instruction to the active function. +// Operates on the global context. +func PSUBUSW(mx, x operand.Op) { ctx.PSUBUSW(mx, x) } + +// PSUBW: Subtract Packed Word Integers. +// +// Forms: +// +// PSUBW xmm xmm +// PSUBW m128 xmm +// Construct and append a PSUBW instruction to the active function. +func (c *Context) PSUBW(mx, x operand.Op) { + if inst, err := x86.PSUBW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PSUBW: Subtract Packed Word Integers. +// +// Forms: +// +// PSUBW xmm xmm +// PSUBW m128 xmm +// Construct and append a PSUBW instruction to the active function. +// Operates on the global context. +func PSUBW(mx, x operand.Op) { ctx.PSUBW(mx, x) } + +// PTEST: Packed Logical Compare. +// +// Forms: +// +// PTEST xmm xmm +// PTEST m128 xmm +// Construct and append a PTEST instruction to the active function. +func (c *Context) PTEST(mx, x operand.Op) { + if inst, err := x86.PTEST(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PTEST: Packed Logical Compare. +// +// Forms: +// +// PTEST xmm xmm +// PTEST m128 xmm +// Construct and append a PTEST instruction to the active function. +// Operates on the global context. +func PTEST(mx, x operand.Op) { ctx.PTEST(mx, x) } + +// PUNPCKHBW: Unpack and Interleave High-Order Bytes into Words. +// +// Forms: +// +// PUNPCKHBW xmm xmm +// PUNPCKHBW m128 xmm +// Construct and append a PUNPCKHBW instruction to the active function. +func (c *Context) PUNPCKHBW(mx, x operand.Op) { + if inst, err := x86.PUNPCKHBW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PUNPCKHBW: Unpack and Interleave High-Order Bytes into Words. +// +// Forms: +// +// PUNPCKHBW xmm xmm +// PUNPCKHBW m128 xmm +// Construct and append a PUNPCKHBW instruction to the active function. +// Operates on the global context. +func PUNPCKHBW(mx, x operand.Op) { ctx.PUNPCKHBW(mx, x) } + +// PUNPCKHLQ: Unpack and Interleave High-Order Doublewords into Quadwords. +// +// Forms: +// +// PUNPCKHLQ xmm xmm +// PUNPCKHLQ m128 xmm +// Construct and append a PUNPCKHLQ instruction to the active function. +func (c *Context) PUNPCKHLQ(mx, x operand.Op) { + if inst, err := x86.PUNPCKHLQ(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PUNPCKHLQ: Unpack and Interleave High-Order Doublewords into Quadwords. +// +// Forms: +// +// PUNPCKHLQ xmm xmm +// PUNPCKHLQ m128 xmm +// Construct and append a PUNPCKHLQ instruction to the active function. +// Operates on the global context. +func PUNPCKHLQ(mx, x operand.Op) { ctx.PUNPCKHLQ(mx, x) } + +// PUNPCKHQDQ: Unpack and Interleave High-Order Quadwords into Double Quadwords. +// +// Forms: +// +// PUNPCKHQDQ xmm xmm +// PUNPCKHQDQ m128 xmm +// Construct and append a PUNPCKHQDQ instruction to the active function. +func (c *Context) PUNPCKHQDQ(mx, x operand.Op) { + if inst, err := x86.PUNPCKHQDQ(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PUNPCKHQDQ: Unpack and Interleave High-Order Quadwords into Double Quadwords. +// +// Forms: +// +// PUNPCKHQDQ xmm xmm +// PUNPCKHQDQ m128 xmm +// Construct and append a PUNPCKHQDQ instruction to the active function. +// Operates on the global context. +func PUNPCKHQDQ(mx, x operand.Op) { ctx.PUNPCKHQDQ(mx, x) } + +// PUNPCKHWL: Unpack and Interleave High-Order Words into Doublewords. +// +// Forms: +// +// PUNPCKHWL xmm xmm +// PUNPCKHWL m128 xmm +// Construct and append a PUNPCKHWL instruction to the active function. +func (c *Context) PUNPCKHWL(mx, x operand.Op) { + if inst, err := x86.PUNPCKHWL(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PUNPCKHWL: Unpack and Interleave High-Order Words into Doublewords. +// +// Forms: +// +// PUNPCKHWL xmm xmm +// PUNPCKHWL m128 xmm +// Construct and append a PUNPCKHWL instruction to the active function. +// Operates on the global context. +func PUNPCKHWL(mx, x operand.Op) { ctx.PUNPCKHWL(mx, x) } + +// PUNPCKLBW: Unpack and Interleave Low-Order Bytes into Words. +// +// Forms: +// +// PUNPCKLBW xmm xmm +// PUNPCKLBW m128 xmm +// Construct and append a PUNPCKLBW instruction to the active function. +func (c *Context) PUNPCKLBW(mx, x operand.Op) { + if inst, err := x86.PUNPCKLBW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PUNPCKLBW: Unpack and Interleave Low-Order Bytes into Words. +// +// Forms: +// +// PUNPCKLBW xmm xmm +// PUNPCKLBW m128 xmm +// Construct and append a PUNPCKLBW instruction to the active function. +// Operates on the global context. +func PUNPCKLBW(mx, x operand.Op) { ctx.PUNPCKLBW(mx, x) } + +// PUNPCKLLQ: Unpack and Interleave Low-Order Doublewords into Quadwords. +// +// Forms: +// +// PUNPCKLLQ xmm xmm +// PUNPCKLLQ m128 xmm +// Construct and append a PUNPCKLLQ instruction to the active function. +func (c *Context) PUNPCKLLQ(mx, x operand.Op) { + if inst, err := x86.PUNPCKLLQ(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PUNPCKLLQ: Unpack and Interleave Low-Order Doublewords into Quadwords. +// +// Forms: +// +// PUNPCKLLQ xmm xmm +// PUNPCKLLQ m128 xmm +// Construct and append a PUNPCKLLQ instruction to the active function. +// Operates on the global context. +func PUNPCKLLQ(mx, x operand.Op) { ctx.PUNPCKLLQ(mx, x) } + +// PUNPCKLQDQ: Unpack and Interleave Low-Order Quadwords into Double Quadwords. +// +// Forms: +// +// PUNPCKLQDQ xmm xmm +// PUNPCKLQDQ m128 xmm +// Construct and append a PUNPCKLQDQ instruction to the active function. +func (c *Context) PUNPCKLQDQ(mx, x operand.Op) { + if inst, err := x86.PUNPCKLQDQ(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PUNPCKLQDQ: Unpack and Interleave Low-Order Quadwords into Double Quadwords. +// +// Forms: +// +// PUNPCKLQDQ xmm xmm +// PUNPCKLQDQ m128 xmm +// Construct and append a PUNPCKLQDQ instruction to the active function. +// Operates on the global context. +func PUNPCKLQDQ(mx, x operand.Op) { ctx.PUNPCKLQDQ(mx, x) } + +// PUNPCKLWL: Unpack and Interleave Low-Order Words into Doublewords. +// +// Forms: +// +// PUNPCKLWL xmm xmm +// PUNPCKLWL m128 xmm +// Construct and append a PUNPCKLWL instruction to the active function. +func (c *Context) PUNPCKLWL(mx, x operand.Op) { + if inst, err := x86.PUNPCKLWL(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PUNPCKLWL: Unpack and Interleave Low-Order Words into Doublewords. +// +// Forms: +// +// PUNPCKLWL xmm xmm +// PUNPCKLWL m128 xmm +// Construct and append a PUNPCKLWL instruction to the active function. +// Operates on the global context. +func PUNPCKLWL(mx, x operand.Op) { ctx.PUNPCKLWL(mx, x) } + +// PUSHQ: Push Value Onto the Stack. +// +// Forms: +// +// PUSHQ imm8 +// PUSHQ imm32 +// PUSHQ r64 +// PUSHQ m64 +// Construct and append a PUSHQ instruction to the active function. +func (c *Context) PUSHQ(imr operand.Op) { + if inst, err := x86.PUSHQ(imr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PUSHQ: Push Value Onto the Stack. +// +// Forms: +// +// PUSHQ imm8 +// PUSHQ imm32 +// PUSHQ r64 +// PUSHQ m64 +// Construct and append a PUSHQ instruction to the active function. +// Operates on the global context. +func PUSHQ(imr operand.Op) { ctx.PUSHQ(imr) } + +// PUSHW: Push Value Onto the Stack. +// +// Forms: +// +// PUSHW r16 +// PUSHW m16 +// Construct and append a PUSHW instruction to the active function. +func (c *Context) PUSHW(mr operand.Op) { + if inst, err := x86.PUSHW(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PUSHW: Push Value Onto the Stack. +// +// Forms: +// +// PUSHW r16 +// PUSHW m16 +// Construct and append a PUSHW instruction to the active function. +// Operates on the global context. +func PUSHW(mr operand.Op) { ctx.PUSHW(mr) } + +// PXOR: Packed Bitwise Logical Exclusive OR. +// +// Forms: +// +// PXOR xmm xmm +// PXOR m128 xmm +// Construct and append a PXOR instruction to the active function. +func (c *Context) PXOR(mx, x operand.Op) { + if inst, err := x86.PXOR(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// PXOR: Packed Bitwise Logical Exclusive OR. +// +// Forms: +// +// PXOR xmm xmm +// PXOR m128 xmm +// Construct and append a PXOR instruction to the active function. +// Operates on the global context. +func PXOR(mx, x operand.Op) { ctx.PXOR(mx, x) } + +// RCLB: Rotate Left through Carry Flag. +// +// Forms: +// +// RCLB 1 r8 +// RCLB imm8 r8 +// RCLB cl r8 +// RCLB 1 m8 +// RCLB imm8 m8 +// RCLB cl m8 +// Construct and append a RCLB instruction to the active function. +func (c *Context) RCLB(ci, mr operand.Op) { + if inst, err := x86.RCLB(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RCLB: Rotate Left through Carry Flag. +// +// Forms: +// +// RCLB 1 r8 +// RCLB imm8 r8 +// RCLB cl r8 +// RCLB 1 m8 +// RCLB imm8 m8 +// RCLB cl m8 +// Construct and append a RCLB instruction to the active function. +// Operates on the global context. +func RCLB(ci, mr operand.Op) { ctx.RCLB(ci, mr) } + +// RCLL: Rotate Left through Carry Flag. +// +// Forms: +// +// RCLL 1 r32 +// RCLL imm8 r32 +// RCLL cl r32 +// RCLL 1 m32 +// RCLL imm8 m32 +// RCLL cl m32 +// Construct and append a RCLL instruction to the active function. +func (c *Context) RCLL(ci, mr operand.Op) { + if inst, err := x86.RCLL(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RCLL: Rotate Left through Carry Flag. +// +// Forms: +// +// RCLL 1 r32 +// RCLL imm8 r32 +// RCLL cl r32 +// RCLL 1 m32 +// RCLL imm8 m32 +// RCLL cl m32 +// Construct and append a RCLL instruction to the active function. +// Operates on the global context. +func RCLL(ci, mr operand.Op) { ctx.RCLL(ci, mr) } + +// RCLQ: Rotate Left through Carry Flag. +// +// Forms: +// +// RCLQ 1 r64 +// RCLQ imm8 r64 +// RCLQ cl r64 +// RCLQ 1 m64 +// RCLQ imm8 m64 +// RCLQ cl m64 +// Construct and append a RCLQ instruction to the active function. +func (c *Context) RCLQ(ci, mr operand.Op) { + if inst, err := x86.RCLQ(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RCLQ: Rotate Left through Carry Flag. +// +// Forms: +// +// RCLQ 1 r64 +// RCLQ imm8 r64 +// RCLQ cl r64 +// RCLQ 1 m64 +// RCLQ imm8 m64 +// RCLQ cl m64 +// Construct and append a RCLQ instruction to the active function. +// Operates on the global context. +func RCLQ(ci, mr operand.Op) { ctx.RCLQ(ci, mr) } + +// RCLW: Rotate Left through Carry Flag. +// +// Forms: +// +// RCLW 1 r16 +// RCLW imm8 r16 +// RCLW cl r16 +// RCLW 1 m16 +// RCLW imm8 m16 +// RCLW cl m16 +// Construct and append a RCLW instruction to the active function. +func (c *Context) RCLW(ci, mr operand.Op) { + if inst, err := x86.RCLW(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RCLW: Rotate Left through Carry Flag. +// +// Forms: +// +// RCLW 1 r16 +// RCLW imm8 r16 +// RCLW cl r16 +// RCLW 1 m16 +// RCLW imm8 m16 +// RCLW cl m16 +// Construct and append a RCLW instruction to the active function. +// Operates on the global context. +func RCLW(ci, mr operand.Op) { ctx.RCLW(ci, mr) } + +// RCPPS: Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// RCPPS xmm xmm +// RCPPS m128 xmm +// Construct and append a RCPPS instruction to the active function. +func (c *Context) RCPPS(mx, x operand.Op) { + if inst, err := x86.RCPPS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RCPPS: Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// RCPPS xmm xmm +// RCPPS m128 xmm +// Construct and append a RCPPS instruction to the active function. +// Operates on the global context. +func RCPPS(mx, x operand.Op) { ctx.RCPPS(mx, x) } + +// RCPSS: Compute Approximate Reciprocal of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// RCPSS xmm xmm +// RCPSS m32 xmm +// Construct and append a RCPSS instruction to the active function. +func (c *Context) RCPSS(mx, x operand.Op) { + if inst, err := x86.RCPSS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RCPSS: Compute Approximate Reciprocal of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// RCPSS xmm xmm +// RCPSS m32 xmm +// Construct and append a RCPSS instruction to the active function. +// Operates on the global context. +func RCPSS(mx, x operand.Op) { ctx.RCPSS(mx, x) } + +// RCRB: Rotate Right through Carry Flag. +// +// Forms: +// +// RCRB 1 r8 +// RCRB imm8 r8 +// RCRB cl r8 +// RCRB 1 m8 +// RCRB imm8 m8 +// RCRB cl m8 +// Construct and append a RCRB instruction to the active function. +func (c *Context) RCRB(ci, mr operand.Op) { + if inst, err := x86.RCRB(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RCRB: Rotate Right through Carry Flag. +// +// Forms: +// +// RCRB 1 r8 +// RCRB imm8 r8 +// RCRB cl r8 +// RCRB 1 m8 +// RCRB imm8 m8 +// RCRB cl m8 +// Construct and append a RCRB instruction to the active function. +// Operates on the global context. +func RCRB(ci, mr operand.Op) { ctx.RCRB(ci, mr) } + +// RCRL: Rotate Right through Carry Flag. +// +// Forms: +// +// RCRL 1 r32 +// RCRL imm8 r32 +// RCRL cl r32 +// RCRL 1 m32 +// RCRL imm8 m32 +// RCRL cl m32 +// Construct and append a RCRL instruction to the active function. +func (c *Context) RCRL(ci, mr operand.Op) { + if inst, err := x86.RCRL(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RCRL: Rotate Right through Carry Flag. +// +// Forms: +// +// RCRL 1 r32 +// RCRL imm8 r32 +// RCRL cl r32 +// RCRL 1 m32 +// RCRL imm8 m32 +// RCRL cl m32 +// Construct and append a RCRL instruction to the active function. +// Operates on the global context. +func RCRL(ci, mr operand.Op) { ctx.RCRL(ci, mr) } + +// RCRQ: Rotate Right through Carry Flag. +// +// Forms: +// +// RCRQ 1 r64 +// RCRQ imm8 r64 +// RCRQ cl r64 +// RCRQ 1 m64 +// RCRQ imm8 m64 +// RCRQ cl m64 +// Construct and append a RCRQ instruction to the active function. +func (c *Context) RCRQ(ci, mr operand.Op) { + if inst, err := x86.RCRQ(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RCRQ: Rotate Right through Carry Flag. +// +// Forms: +// +// RCRQ 1 r64 +// RCRQ imm8 r64 +// RCRQ cl r64 +// RCRQ 1 m64 +// RCRQ imm8 m64 +// RCRQ cl m64 +// Construct and append a RCRQ instruction to the active function. +// Operates on the global context. +func RCRQ(ci, mr operand.Op) { ctx.RCRQ(ci, mr) } + +// RCRW: Rotate Right through Carry Flag. +// +// Forms: +// +// RCRW 1 r16 +// RCRW imm8 r16 +// RCRW cl r16 +// RCRW 1 m16 +// RCRW imm8 m16 +// RCRW cl m16 +// Construct and append a RCRW instruction to the active function. +func (c *Context) RCRW(ci, mr operand.Op) { + if inst, err := x86.RCRW(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RCRW: Rotate Right through Carry Flag. +// +// Forms: +// +// RCRW 1 r16 +// RCRW imm8 r16 +// RCRW cl r16 +// RCRW 1 m16 +// RCRW imm8 m16 +// RCRW cl m16 +// Construct and append a RCRW instruction to the active function. +// Operates on the global context. +func RCRW(ci, mr operand.Op) { ctx.RCRW(ci, mr) } + +// RDRANDL: Read Random Number. +// +// Forms: +// +// RDRANDL r32 +// Construct and append a RDRANDL instruction to the active function. +func (c *Context) RDRANDL(r operand.Op) { + if inst, err := x86.RDRANDL(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RDRANDL: Read Random Number. +// +// Forms: +// +// RDRANDL r32 +// Construct and append a RDRANDL instruction to the active function. +// Operates on the global context. +func RDRANDL(r operand.Op) { ctx.RDRANDL(r) } + +// RDRANDQ: Read Random Number. +// +// Forms: +// +// RDRANDQ r64 +// Construct and append a RDRANDQ instruction to the active function. +func (c *Context) RDRANDQ(r operand.Op) { + if inst, err := x86.RDRANDQ(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RDRANDQ: Read Random Number. +// +// Forms: +// +// RDRANDQ r64 +// Construct and append a RDRANDQ instruction to the active function. +// Operates on the global context. +func RDRANDQ(r operand.Op) { ctx.RDRANDQ(r) } + +// RDRANDW: Read Random Number. +// +// Forms: +// +// RDRANDW r16 +// Construct and append a RDRANDW instruction to the active function. +func (c *Context) RDRANDW(r operand.Op) { + if inst, err := x86.RDRANDW(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RDRANDW: Read Random Number. +// +// Forms: +// +// RDRANDW r16 +// Construct and append a RDRANDW instruction to the active function. +// Operates on the global context. +func RDRANDW(r operand.Op) { ctx.RDRANDW(r) } + +// RDSEEDL: Read Random SEED. +// +// Forms: +// +// RDSEEDL r32 +// Construct and append a RDSEEDL instruction to the active function. +func (c *Context) RDSEEDL(r operand.Op) { + if inst, err := x86.RDSEEDL(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RDSEEDL: Read Random SEED. +// +// Forms: +// +// RDSEEDL r32 +// Construct and append a RDSEEDL instruction to the active function. +// Operates on the global context. +func RDSEEDL(r operand.Op) { ctx.RDSEEDL(r) } + +// RDSEEDQ: Read Random SEED. +// +// Forms: +// +// RDSEEDQ r64 +// Construct and append a RDSEEDQ instruction to the active function. +func (c *Context) RDSEEDQ(r operand.Op) { + if inst, err := x86.RDSEEDQ(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RDSEEDQ: Read Random SEED. +// +// Forms: +// +// RDSEEDQ r64 +// Construct and append a RDSEEDQ instruction to the active function. +// Operates on the global context. +func RDSEEDQ(r operand.Op) { ctx.RDSEEDQ(r) } + +// RDSEEDW: Read Random SEED. +// +// Forms: +// +// RDSEEDW r16 +// Construct and append a RDSEEDW instruction to the active function. +func (c *Context) RDSEEDW(r operand.Op) { + if inst, err := x86.RDSEEDW(r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RDSEEDW: Read Random SEED. +// +// Forms: +// +// RDSEEDW r16 +// Construct and append a RDSEEDW instruction to the active function. +// Operates on the global context. +func RDSEEDW(r operand.Op) { ctx.RDSEEDW(r) } + +// RDTSC: Read Time-Stamp Counter. +// +// Forms: +// +// RDTSC +// Construct and append a RDTSC instruction to the active function. +func (c *Context) RDTSC() { + if inst, err := x86.RDTSC(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RDTSC: Read Time-Stamp Counter. +// +// Forms: +// +// RDTSC +// Construct and append a RDTSC instruction to the active function. +// Operates on the global context. +func RDTSC() { ctx.RDTSC() } + +// RDTSCP: Read Time-Stamp Counter and Processor ID. +// +// Forms: +// +// RDTSCP +// Construct and append a RDTSCP instruction to the active function. +func (c *Context) RDTSCP() { + if inst, err := x86.RDTSCP(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RDTSCP: Read Time-Stamp Counter and Processor ID. +// +// Forms: +// +// RDTSCP +// Construct and append a RDTSCP instruction to the active function. +// Operates on the global context. +func RDTSCP() { ctx.RDTSCP() } + +// RET: Return from Procedure. +// +// Forms: +// +// RET +// Construct and append a RET instruction to the active function. +func (c *Context) RET() { + if inst, err := x86.RET(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RET: Return from Procedure. +// +// Forms: +// +// RET +// Construct and append a RET instruction to the active function. +// Operates on the global context. +func RET() { ctx.RET() } + +// RETFL: Return from Procedure. +// +// Forms: +// +// RETFL imm16 +// Construct and append a RETFL instruction to the active function. +func (c *Context) RETFL(i operand.Op) { + if inst, err := x86.RETFL(i); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RETFL: Return from Procedure. +// +// Forms: +// +// RETFL imm16 +// Construct and append a RETFL instruction to the active function. +// Operates on the global context. +func RETFL(i operand.Op) { ctx.RETFL(i) } + +// RETFQ: Return from Procedure. +// +// Forms: +// +// RETFQ imm16 +// Construct and append a RETFQ instruction to the active function. +func (c *Context) RETFQ(i operand.Op) { + if inst, err := x86.RETFQ(i); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RETFQ: Return from Procedure. +// +// Forms: +// +// RETFQ imm16 +// Construct and append a RETFQ instruction to the active function. +// Operates on the global context. +func RETFQ(i operand.Op) { ctx.RETFQ(i) } + +// RETFW: Return from Procedure. +// +// Forms: +// +// RETFW imm16 +// Construct and append a RETFW instruction to the active function. +func (c *Context) RETFW(i operand.Op) { + if inst, err := x86.RETFW(i); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RETFW: Return from Procedure. +// +// Forms: +// +// RETFW imm16 +// Construct and append a RETFW instruction to the active function. +// Operates on the global context. +func RETFW(i operand.Op) { ctx.RETFW(i) } + +// ROLB: Rotate Left. +// +// Forms: +// +// ROLB 1 r8 +// ROLB imm8 r8 +// ROLB cl r8 +// ROLB 1 m8 +// ROLB imm8 m8 +// ROLB cl m8 +// Construct and append a ROLB instruction to the active function. +func (c *Context) ROLB(ci, mr operand.Op) { + if inst, err := x86.ROLB(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ROLB: Rotate Left. +// +// Forms: +// +// ROLB 1 r8 +// ROLB imm8 r8 +// ROLB cl r8 +// ROLB 1 m8 +// ROLB imm8 m8 +// ROLB cl m8 +// Construct and append a ROLB instruction to the active function. +// Operates on the global context. +func ROLB(ci, mr operand.Op) { ctx.ROLB(ci, mr) } + +// ROLL: Rotate Left. +// +// Forms: +// +// ROLL 1 r32 +// ROLL imm8 r32 +// ROLL cl r32 +// ROLL 1 m32 +// ROLL imm8 m32 +// ROLL cl m32 +// Construct and append a ROLL instruction to the active function. +func (c *Context) ROLL(ci, mr operand.Op) { + if inst, err := x86.ROLL(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ROLL: Rotate Left. +// +// Forms: +// +// ROLL 1 r32 +// ROLL imm8 r32 +// ROLL cl r32 +// ROLL 1 m32 +// ROLL imm8 m32 +// ROLL cl m32 +// Construct and append a ROLL instruction to the active function. +// Operates on the global context. +func ROLL(ci, mr operand.Op) { ctx.ROLL(ci, mr) } + +// ROLQ: Rotate Left. +// +// Forms: +// +// ROLQ 1 r64 +// ROLQ imm8 r64 +// ROLQ cl r64 +// ROLQ 1 m64 +// ROLQ imm8 m64 +// ROLQ cl m64 +// Construct and append a ROLQ instruction to the active function. +func (c *Context) ROLQ(ci, mr operand.Op) { + if inst, err := x86.ROLQ(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ROLQ: Rotate Left. +// +// Forms: +// +// ROLQ 1 r64 +// ROLQ imm8 r64 +// ROLQ cl r64 +// ROLQ 1 m64 +// ROLQ imm8 m64 +// ROLQ cl m64 +// Construct and append a ROLQ instruction to the active function. +// Operates on the global context. +func ROLQ(ci, mr operand.Op) { ctx.ROLQ(ci, mr) } + +// ROLW: Rotate Left. +// +// Forms: +// +// ROLW 1 r16 +// ROLW imm8 r16 +// ROLW cl r16 +// ROLW 1 m16 +// ROLW imm8 m16 +// ROLW cl m16 +// Construct and append a ROLW instruction to the active function. +func (c *Context) ROLW(ci, mr operand.Op) { + if inst, err := x86.ROLW(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ROLW: Rotate Left. +// +// Forms: +// +// ROLW 1 r16 +// ROLW imm8 r16 +// ROLW cl r16 +// ROLW 1 m16 +// ROLW imm8 m16 +// ROLW cl m16 +// Construct and append a ROLW instruction to the active function. +// Operates on the global context. +func ROLW(ci, mr operand.Op) { ctx.ROLW(ci, mr) } + +// RORB: Rotate Right. +// +// Forms: +// +// RORB 1 r8 +// RORB imm8 r8 +// RORB cl r8 +// RORB 1 m8 +// RORB imm8 m8 +// RORB cl m8 +// Construct and append a RORB instruction to the active function. +func (c *Context) RORB(ci, mr operand.Op) { + if inst, err := x86.RORB(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RORB: Rotate Right. +// +// Forms: +// +// RORB 1 r8 +// RORB imm8 r8 +// RORB cl r8 +// RORB 1 m8 +// RORB imm8 m8 +// RORB cl m8 +// Construct and append a RORB instruction to the active function. +// Operates on the global context. +func RORB(ci, mr operand.Op) { ctx.RORB(ci, mr) } + +// RORL: Rotate Right. +// +// Forms: +// +// RORL 1 r32 +// RORL imm8 r32 +// RORL cl r32 +// RORL 1 m32 +// RORL imm8 m32 +// RORL cl m32 +// Construct and append a RORL instruction to the active function. +func (c *Context) RORL(ci, mr operand.Op) { + if inst, err := x86.RORL(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RORL: Rotate Right. +// +// Forms: +// +// RORL 1 r32 +// RORL imm8 r32 +// RORL cl r32 +// RORL 1 m32 +// RORL imm8 m32 +// RORL cl m32 +// Construct and append a RORL instruction to the active function. +// Operates on the global context. +func RORL(ci, mr operand.Op) { ctx.RORL(ci, mr) } + +// RORQ: Rotate Right. +// +// Forms: +// +// RORQ 1 r64 +// RORQ imm8 r64 +// RORQ cl r64 +// RORQ 1 m64 +// RORQ imm8 m64 +// RORQ cl m64 +// Construct and append a RORQ instruction to the active function. +func (c *Context) RORQ(ci, mr operand.Op) { + if inst, err := x86.RORQ(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RORQ: Rotate Right. +// +// Forms: +// +// RORQ 1 r64 +// RORQ imm8 r64 +// RORQ cl r64 +// RORQ 1 m64 +// RORQ imm8 m64 +// RORQ cl m64 +// Construct and append a RORQ instruction to the active function. +// Operates on the global context. +func RORQ(ci, mr operand.Op) { ctx.RORQ(ci, mr) } + +// RORW: Rotate Right. +// +// Forms: +// +// RORW 1 r16 +// RORW imm8 r16 +// RORW cl r16 +// RORW 1 m16 +// RORW imm8 m16 +// RORW cl m16 +// Construct and append a RORW instruction to the active function. +func (c *Context) RORW(ci, mr operand.Op) { + if inst, err := x86.RORW(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RORW: Rotate Right. +// +// Forms: +// +// RORW 1 r16 +// RORW imm8 r16 +// RORW cl r16 +// RORW 1 m16 +// RORW imm8 m16 +// RORW cl m16 +// Construct and append a RORW instruction to the active function. +// Operates on the global context. +func RORW(ci, mr operand.Op) { ctx.RORW(ci, mr) } + +// RORXL: Rotate Right Logical Without Affecting Flags. +// +// Forms: +// +// RORXL imm8 r32 r32 +// RORXL imm8 m32 r32 +// Construct and append a RORXL instruction to the active function. +func (c *Context) RORXL(i, mr, r operand.Op) { + if inst, err := x86.RORXL(i, mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RORXL: Rotate Right Logical Without Affecting Flags. +// +// Forms: +// +// RORXL imm8 r32 r32 +// RORXL imm8 m32 r32 +// Construct and append a RORXL instruction to the active function. +// Operates on the global context. +func RORXL(i, mr, r operand.Op) { ctx.RORXL(i, mr, r) } + +// RORXQ: Rotate Right Logical Without Affecting Flags. +// +// Forms: +// +// RORXQ imm8 r64 r64 +// RORXQ imm8 m64 r64 +// Construct and append a RORXQ instruction to the active function. +func (c *Context) RORXQ(i, mr, r operand.Op) { + if inst, err := x86.RORXQ(i, mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RORXQ: Rotate Right Logical Without Affecting Flags. +// +// Forms: +// +// RORXQ imm8 r64 r64 +// RORXQ imm8 m64 r64 +// Construct and append a RORXQ instruction to the active function. +// Operates on the global context. +func RORXQ(i, mr, r operand.Op) { ctx.RORXQ(i, mr, r) } + +// ROUNDPD: Round Packed Double Precision Floating-Point Values. +// +// Forms: +// +// ROUNDPD imm8 xmm xmm +// ROUNDPD imm8 m128 xmm +// Construct and append a ROUNDPD instruction to the active function. +func (c *Context) ROUNDPD(i, mx, x operand.Op) { + if inst, err := x86.ROUNDPD(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ROUNDPD: Round Packed Double Precision Floating-Point Values. +// +// Forms: +// +// ROUNDPD imm8 xmm xmm +// ROUNDPD imm8 m128 xmm +// Construct and append a ROUNDPD instruction to the active function. +// Operates on the global context. +func ROUNDPD(i, mx, x operand.Op) { ctx.ROUNDPD(i, mx, x) } + +// ROUNDPS: Round Packed Single Precision Floating-Point Values. +// +// Forms: +// +// ROUNDPS imm8 xmm xmm +// ROUNDPS imm8 m128 xmm +// Construct and append a ROUNDPS instruction to the active function. +func (c *Context) ROUNDPS(i, mx, x operand.Op) { + if inst, err := x86.ROUNDPS(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ROUNDPS: Round Packed Single Precision Floating-Point Values. +// +// Forms: +// +// ROUNDPS imm8 xmm xmm +// ROUNDPS imm8 m128 xmm +// Construct and append a ROUNDPS instruction to the active function. +// Operates on the global context. +func ROUNDPS(i, mx, x operand.Op) { ctx.ROUNDPS(i, mx, x) } + +// ROUNDSD: Round Scalar Double Precision Floating-Point Values. +// +// Forms: +// +// ROUNDSD imm8 xmm xmm +// ROUNDSD imm8 m64 xmm +// Construct and append a ROUNDSD instruction to the active function. +func (c *Context) ROUNDSD(i, mx, x operand.Op) { + if inst, err := x86.ROUNDSD(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ROUNDSD: Round Scalar Double Precision Floating-Point Values. +// +// Forms: +// +// ROUNDSD imm8 xmm xmm +// ROUNDSD imm8 m64 xmm +// Construct and append a ROUNDSD instruction to the active function. +// Operates on the global context. +func ROUNDSD(i, mx, x operand.Op) { ctx.ROUNDSD(i, mx, x) } + +// ROUNDSS: Round Scalar Single Precision Floating-Point Values. +// +// Forms: +// +// ROUNDSS imm8 xmm xmm +// ROUNDSS imm8 m32 xmm +// Construct and append a ROUNDSS instruction to the active function. +func (c *Context) ROUNDSS(i, mx, x operand.Op) { + if inst, err := x86.ROUNDSS(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// ROUNDSS: Round Scalar Single Precision Floating-Point Values. +// +// Forms: +// +// ROUNDSS imm8 xmm xmm +// ROUNDSS imm8 m32 xmm +// Construct and append a ROUNDSS instruction to the active function. +// Operates on the global context. +func ROUNDSS(i, mx, x operand.Op) { ctx.ROUNDSS(i, mx, x) } + +// RSQRTPS: Compute Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// RSQRTPS xmm xmm +// RSQRTPS m128 xmm +// Construct and append a RSQRTPS instruction to the active function. +func (c *Context) RSQRTPS(mx, x operand.Op) { + if inst, err := x86.RSQRTPS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RSQRTPS: Compute Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// RSQRTPS xmm xmm +// RSQRTPS m128 xmm +// Construct and append a RSQRTPS instruction to the active function. +// Operates on the global context. +func RSQRTPS(mx, x operand.Op) { ctx.RSQRTPS(mx, x) } + +// RSQRTSS: Compute Reciprocal of Square Root of Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// RSQRTSS xmm xmm +// RSQRTSS m32 xmm +// Construct and append a RSQRTSS instruction to the active function. +func (c *Context) RSQRTSS(mx, x operand.Op) { + if inst, err := x86.RSQRTSS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// RSQRTSS: Compute Reciprocal of Square Root of Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// RSQRTSS xmm xmm +// RSQRTSS m32 xmm +// Construct and append a RSQRTSS instruction to the active function. +// Operates on the global context. +func RSQRTSS(mx, x operand.Op) { ctx.RSQRTSS(mx, x) } + +// SALB: Arithmetic Shift Left. +// +// Forms: +// +// SALB 1 r8 +// SALB imm8 r8 +// SALB cl r8 +// SALB 1 m8 +// SALB imm8 m8 +// SALB cl m8 +// Construct and append a SALB instruction to the active function. +func (c *Context) SALB(ci, mr operand.Op) { + if inst, err := x86.SALB(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SALB: Arithmetic Shift Left. +// +// Forms: +// +// SALB 1 r8 +// SALB imm8 r8 +// SALB cl r8 +// SALB 1 m8 +// SALB imm8 m8 +// SALB cl m8 +// Construct and append a SALB instruction to the active function. +// Operates on the global context. +func SALB(ci, mr operand.Op) { ctx.SALB(ci, mr) } + +// SALL: Arithmetic Shift Left. +// +// Forms: +// +// SALL 1 r32 +// SALL imm8 r32 +// SALL cl r32 +// SALL 1 m32 +// SALL imm8 m32 +// SALL cl m32 +// Construct and append a SALL instruction to the active function. +func (c *Context) SALL(ci, mr operand.Op) { + if inst, err := x86.SALL(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SALL: Arithmetic Shift Left. +// +// Forms: +// +// SALL 1 r32 +// SALL imm8 r32 +// SALL cl r32 +// SALL 1 m32 +// SALL imm8 m32 +// SALL cl m32 +// Construct and append a SALL instruction to the active function. +// Operates on the global context. +func SALL(ci, mr operand.Op) { ctx.SALL(ci, mr) } + +// SALQ: Arithmetic Shift Left. +// +// Forms: +// +// SALQ 1 r64 +// SALQ imm8 r64 +// SALQ cl r64 +// SALQ 1 m64 +// SALQ imm8 m64 +// SALQ cl m64 +// Construct and append a SALQ instruction to the active function. +func (c *Context) SALQ(ci, mr operand.Op) { + if inst, err := x86.SALQ(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SALQ: Arithmetic Shift Left. +// +// Forms: +// +// SALQ 1 r64 +// SALQ imm8 r64 +// SALQ cl r64 +// SALQ 1 m64 +// SALQ imm8 m64 +// SALQ cl m64 +// Construct and append a SALQ instruction to the active function. +// Operates on the global context. +func SALQ(ci, mr operand.Op) { ctx.SALQ(ci, mr) } + +// SALW: Arithmetic Shift Left. +// +// Forms: +// +// SALW 1 r16 +// SALW imm8 r16 +// SALW cl r16 +// SALW 1 m16 +// SALW imm8 m16 +// SALW cl m16 +// Construct and append a SALW instruction to the active function. +func (c *Context) SALW(ci, mr operand.Op) { + if inst, err := x86.SALW(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SALW: Arithmetic Shift Left. +// +// Forms: +// +// SALW 1 r16 +// SALW imm8 r16 +// SALW cl r16 +// SALW 1 m16 +// SALW imm8 m16 +// SALW cl m16 +// Construct and append a SALW instruction to the active function. +// Operates on the global context. +func SALW(ci, mr operand.Op) { ctx.SALW(ci, mr) } + +// SARB: Arithmetic Shift Right. +// +// Forms: +// +// SARB 1 r8 +// SARB imm8 r8 +// SARB cl r8 +// SARB 1 m8 +// SARB imm8 m8 +// SARB cl m8 +// Construct and append a SARB instruction to the active function. +func (c *Context) SARB(ci, mr operand.Op) { + if inst, err := x86.SARB(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SARB: Arithmetic Shift Right. +// +// Forms: +// +// SARB 1 r8 +// SARB imm8 r8 +// SARB cl r8 +// SARB 1 m8 +// SARB imm8 m8 +// SARB cl m8 +// Construct and append a SARB instruction to the active function. +// Operates on the global context. +func SARB(ci, mr operand.Op) { ctx.SARB(ci, mr) } + +// SARL: Arithmetic Shift Right. +// +// Forms: +// +// SARL 1 r32 +// SARL imm8 r32 +// SARL cl r32 +// SARL 1 m32 +// SARL imm8 m32 +// SARL cl m32 +// Construct and append a SARL instruction to the active function. +func (c *Context) SARL(ci, mr operand.Op) { + if inst, err := x86.SARL(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SARL: Arithmetic Shift Right. +// +// Forms: +// +// SARL 1 r32 +// SARL imm8 r32 +// SARL cl r32 +// SARL 1 m32 +// SARL imm8 m32 +// SARL cl m32 +// Construct and append a SARL instruction to the active function. +// Operates on the global context. +func SARL(ci, mr operand.Op) { ctx.SARL(ci, mr) } + +// SARQ: Arithmetic Shift Right. +// +// Forms: +// +// SARQ 1 r64 +// SARQ imm8 r64 +// SARQ cl r64 +// SARQ 1 m64 +// SARQ imm8 m64 +// SARQ cl m64 +// Construct and append a SARQ instruction to the active function. +func (c *Context) SARQ(ci, mr operand.Op) { + if inst, err := x86.SARQ(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SARQ: Arithmetic Shift Right. +// +// Forms: +// +// SARQ 1 r64 +// SARQ imm8 r64 +// SARQ cl r64 +// SARQ 1 m64 +// SARQ imm8 m64 +// SARQ cl m64 +// Construct and append a SARQ instruction to the active function. +// Operates on the global context. +func SARQ(ci, mr operand.Op) { ctx.SARQ(ci, mr) } + +// SARW: Arithmetic Shift Right. +// +// Forms: +// +// SARW 1 r16 +// SARW imm8 r16 +// SARW cl r16 +// SARW 1 m16 +// SARW imm8 m16 +// SARW cl m16 +// Construct and append a SARW instruction to the active function. +func (c *Context) SARW(ci, mr operand.Op) { + if inst, err := x86.SARW(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SARW: Arithmetic Shift Right. +// +// Forms: +// +// SARW 1 r16 +// SARW imm8 r16 +// SARW cl r16 +// SARW 1 m16 +// SARW imm8 m16 +// SARW cl m16 +// Construct and append a SARW instruction to the active function. +// Operates on the global context. +func SARW(ci, mr operand.Op) { ctx.SARW(ci, mr) } + +// SARXL: Arithmetic Shift Right Without Affecting Flags. +// +// Forms: +// +// SARXL r32 r32 r32 +// SARXL r32 m32 r32 +// Construct and append a SARXL instruction to the active function. +func (c *Context) SARXL(r, mr, r1 operand.Op) { + if inst, err := x86.SARXL(r, mr, r1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SARXL: Arithmetic Shift Right Without Affecting Flags. +// +// Forms: +// +// SARXL r32 r32 r32 +// SARXL r32 m32 r32 +// Construct and append a SARXL instruction to the active function. +// Operates on the global context. +func SARXL(r, mr, r1 operand.Op) { ctx.SARXL(r, mr, r1) } + +// SARXQ: Arithmetic Shift Right Without Affecting Flags. +// +// Forms: +// +// SARXQ r64 r64 r64 +// SARXQ r64 m64 r64 +// Construct and append a SARXQ instruction to the active function. +func (c *Context) SARXQ(r, mr, r1 operand.Op) { + if inst, err := x86.SARXQ(r, mr, r1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SARXQ: Arithmetic Shift Right Without Affecting Flags. +// +// Forms: +// +// SARXQ r64 r64 r64 +// SARXQ r64 m64 r64 +// Construct and append a SARXQ instruction to the active function. +// Operates on the global context. +func SARXQ(r, mr, r1 operand.Op) { ctx.SARXQ(r, mr, r1) } + +// SBBB: Subtract with Borrow. +// +// Forms: +// +// SBBB imm8 al +// SBBB imm8 r8 +// SBBB r8 r8 +// SBBB m8 r8 +// SBBB imm8 m8 +// SBBB r8 m8 +// Construct and append a SBBB instruction to the active function. +func (c *Context) SBBB(imr, amr operand.Op) { + if inst, err := x86.SBBB(imr, amr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SBBB: Subtract with Borrow. +// +// Forms: +// +// SBBB imm8 al +// SBBB imm8 r8 +// SBBB r8 r8 +// SBBB m8 r8 +// SBBB imm8 m8 +// SBBB r8 m8 +// Construct and append a SBBB instruction to the active function. +// Operates on the global context. +func SBBB(imr, amr operand.Op) { ctx.SBBB(imr, amr) } + +// SBBL: Subtract with Borrow. +// +// Forms: +// +// SBBL imm32 eax +// SBBL imm8 r32 +// SBBL imm32 r32 +// SBBL r32 r32 +// SBBL m32 r32 +// SBBL imm8 m32 +// SBBL imm32 m32 +// SBBL r32 m32 +// Construct and append a SBBL instruction to the active function. +func (c *Context) SBBL(imr, emr operand.Op) { + if inst, err := x86.SBBL(imr, emr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SBBL: Subtract with Borrow. +// +// Forms: +// +// SBBL imm32 eax +// SBBL imm8 r32 +// SBBL imm32 r32 +// SBBL r32 r32 +// SBBL m32 r32 +// SBBL imm8 m32 +// SBBL imm32 m32 +// SBBL r32 m32 +// Construct and append a SBBL instruction to the active function. +// Operates on the global context. +func SBBL(imr, emr operand.Op) { ctx.SBBL(imr, emr) } + +// SBBQ: Subtract with Borrow. +// +// Forms: +// +// SBBQ imm32 rax +// SBBQ imm8 r64 +// SBBQ imm32 r64 +// SBBQ r64 r64 +// SBBQ m64 r64 +// SBBQ imm8 m64 +// SBBQ imm32 m64 +// SBBQ r64 m64 +// Construct and append a SBBQ instruction to the active function. +func (c *Context) SBBQ(imr, mr operand.Op) { + if inst, err := x86.SBBQ(imr, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SBBQ: Subtract with Borrow. +// +// Forms: +// +// SBBQ imm32 rax +// SBBQ imm8 r64 +// SBBQ imm32 r64 +// SBBQ r64 r64 +// SBBQ m64 r64 +// SBBQ imm8 m64 +// SBBQ imm32 m64 +// SBBQ r64 m64 +// Construct and append a SBBQ instruction to the active function. +// Operates on the global context. +func SBBQ(imr, mr operand.Op) { ctx.SBBQ(imr, mr) } + +// SBBW: Subtract with Borrow. +// +// Forms: +// +// SBBW imm16 ax +// SBBW imm8 r16 +// SBBW imm16 r16 +// SBBW r16 r16 +// SBBW m16 r16 +// SBBW imm8 m16 +// SBBW imm16 m16 +// SBBW r16 m16 +// Construct and append a SBBW instruction to the active function. +func (c *Context) SBBW(imr, amr operand.Op) { + if inst, err := x86.SBBW(imr, amr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SBBW: Subtract with Borrow. +// +// Forms: +// +// SBBW imm16 ax +// SBBW imm8 r16 +// SBBW imm16 r16 +// SBBW r16 r16 +// SBBW m16 r16 +// SBBW imm8 m16 +// SBBW imm16 m16 +// SBBW r16 m16 +// Construct and append a SBBW instruction to the active function. +// Operates on the global context. +func SBBW(imr, amr operand.Op) { ctx.SBBW(imr, amr) } + +// SETCC: Set byte if above or equal (CF == 0). +// +// Forms: +// +// SETCC r8 +// SETCC m8 +// Construct and append a SETCC instruction to the active function. +func (c *Context) SETCC(mr operand.Op) { + if inst, err := x86.SETCC(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SETCC: Set byte if above or equal (CF == 0). +// +// Forms: +// +// SETCC r8 +// SETCC m8 +// Construct and append a SETCC instruction to the active function. +// Operates on the global context. +func SETCC(mr operand.Op) { ctx.SETCC(mr) } + +// SETCS: Set byte if below (CF == 1). +// +// Forms: +// +// SETCS r8 +// SETCS m8 +// Construct and append a SETCS instruction to the active function. +func (c *Context) SETCS(mr operand.Op) { + if inst, err := x86.SETCS(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SETCS: Set byte if below (CF == 1). +// +// Forms: +// +// SETCS r8 +// SETCS m8 +// Construct and append a SETCS instruction to the active function. +// Operates on the global context. +func SETCS(mr operand.Op) { ctx.SETCS(mr) } + +// SETEQ: Set byte if equal (ZF == 1). +// +// Forms: +// +// SETEQ r8 +// SETEQ m8 +// Construct and append a SETEQ instruction to the active function. +func (c *Context) SETEQ(mr operand.Op) { + if inst, err := x86.SETEQ(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SETEQ: Set byte if equal (ZF == 1). +// +// Forms: +// +// SETEQ r8 +// SETEQ m8 +// Construct and append a SETEQ instruction to the active function. +// Operates on the global context. +func SETEQ(mr operand.Op) { ctx.SETEQ(mr) } + +// SETGE: Set byte if greater or equal (SF == OF). +// +// Forms: +// +// SETGE r8 +// SETGE m8 +// Construct and append a SETGE instruction to the active function. +func (c *Context) SETGE(mr operand.Op) { + if inst, err := x86.SETGE(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SETGE: Set byte if greater or equal (SF == OF). +// +// Forms: +// +// SETGE r8 +// SETGE m8 +// Construct and append a SETGE instruction to the active function. +// Operates on the global context. +func SETGE(mr operand.Op) { ctx.SETGE(mr) } + +// SETGT: Set byte if greater (ZF == 0 and SF == OF). +// +// Forms: +// +// SETGT r8 +// SETGT m8 +// Construct and append a SETGT instruction to the active function. +func (c *Context) SETGT(mr operand.Op) { + if inst, err := x86.SETGT(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SETGT: Set byte if greater (ZF == 0 and SF == OF). +// +// Forms: +// +// SETGT r8 +// SETGT m8 +// Construct and append a SETGT instruction to the active function. +// Operates on the global context. +func SETGT(mr operand.Op) { ctx.SETGT(mr) } + +// SETHI: Set byte if above (CF == 0 and ZF == 0). +// +// Forms: +// +// SETHI r8 +// SETHI m8 +// Construct and append a SETHI instruction to the active function. +func (c *Context) SETHI(mr operand.Op) { + if inst, err := x86.SETHI(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SETHI: Set byte if above (CF == 0 and ZF == 0). +// +// Forms: +// +// SETHI r8 +// SETHI m8 +// Construct and append a SETHI instruction to the active function. +// Operates on the global context. +func SETHI(mr operand.Op) { ctx.SETHI(mr) } + +// SETLE: Set byte if less or equal (ZF == 1 or SF != OF). +// +// Forms: +// +// SETLE r8 +// SETLE m8 +// Construct and append a SETLE instruction to the active function. +func (c *Context) SETLE(mr operand.Op) { + if inst, err := x86.SETLE(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SETLE: Set byte if less or equal (ZF == 1 or SF != OF). +// +// Forms: +// +// SETLE r8 +// SETLE m8 +// Construct and append a SETLE instruction to the active function. +// Operates on the global context. +func SETLE(mr operand.Op) { ctx.SETLE(mr) } + +// SETLS: Set byte if below or equal (CF == 1 or ZF == 1). +// +// Forms: +// +// SETLS r8 +// SETLS m8 +// Construct and append a SETLS instruction to the active function. +func (c *Context) SETLS(mr operand.Op) { + if inst, err := x86.SETLS(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SETLS: Set byte if below or equal (CF == 1 or ZF == 1). +// +// Forms: +// +// SETLS r8 +// SETLS m8 +// Construct and append a SETLS instruction to the active function. +// Operates on the global context. +func SETLS(mr operand.Op) { ctx.SETLS(mr) } + +// SETLT: Set byte if less (SF != OF). +// +// Forms: +// +// SETLT r8 +// SETLT m8 +// Construct and append a SETLT instruction to the active function. +func (c *Context) SETLT(mr operand.Op) { + if inst, err := x86.SETLT(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SETLT: Set byte if less (SF != OF). +// +// Forms: +// +// SETLT r8 +// SETLT m8 +// Construct and append a SETLT instruction to the active function. +// Operates on the global context. +func SETLT(mr operand.Op) { ctx.SETLT(mr) } + +// SETMI: Set byte if sign (SF == 1). +// +// Forms: +// +// SETMI r8 +// SETMI m8 +// Construct and append a SETMI instruction to the active function. +func (c *Context) SETMI(mr operand.Op) { + if inst, err := x86.SETMI(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SETMI: Set byte if sign (SF == 1). +// +// Forms: +// +// SETMI r8 +// SETMI m8 +// Construct and append a SETMI instruction to the active function. +// Operates on the global context. +func SETMI(mr operand.Op) { ctx.SETMI(mr) } + +// SETNE: Set byte if not equal (ZF == 0). +// +// Forms: +// +// SETNE r8 +// SETNE m8 +// Construct and append a SETNE instruction to the active function. +func (c *Context) SETNE(mr operand.Op) { + if inst, err := x86.SETNE(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SETNE: Set byte if not equal (ZF == 0). +// +// Forms: +// +// SETNE r8 +// SETNE m8 +// Construct and append a SETNE instruction to the active function. +// Operates on the global context. +func SETNE(mr operand.Op) { ctx.SETNE(mr) } + +// SETOC: Set byte if not overflow (OF == 0). +// +// Forms: +// +// SETOC r8 +// SETOC m8 +// Construct and append a SETOC instruction to the active function. +func (c *Context) SETOC(mr operand.Op) { + if inst, err := x86.SETOC(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SETOC: Set byte if not overflow (OF == 0). +// +// Forms: +// +// SETOC r8 +// SETOC m8 +// Construct and append a SETOC instruction to the active function. +// Operates on the global context. +func SETOC(mr operand.Op) { ctx.SETOC(mr) } + +// SETOS: Set byte if overflow (OF == 1). +// +// Forms: +// +// SETOS r8 +// SETOS m8 +// Construct and append a SETOS instruction to the active function. +func (c *Context) SETOS(mr operand.Op) { + if inst, err := x86.SETOS(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SETOS: Set byte if overflow (OF == 1). +// +// Forms: +// +// SETOS r8 +// SETOS m8 +// Construct and append a SETOS instruction to the active function. +// Operates on the global context. +func SETOS(mr operand.Op) { ctx.SETOS(mr) } + +// SETPC: Set byte if not parity (PF == 0). +// +// Forms: +// +// SETPC r8 +// SETPC m8 +// Construct and append a SETPC instruction to the active function. +func (c *Context) SETPC(mr operand.Op) { + if inst, err := x86.SETPC(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SETPC: Set byte if not parity (PF == 0). +// +// Forms: +// +// SETPC r8 +// SETPC m8 +// Construct and append a SETPC instruction to the active function. +// Operates on the global context. +func SETPC(mr operand.Op) { ctx.SETPC(mr) } + +// SETPL: Set byte if not sign (SF == 0). +// +// Forms: +// +// SETPL r8 +// SETPL m8 +// Construct and append a SETPL instruction to the active function. +func (c *Context) SETPL(mr operand.Op) { + if inst, err := x86.SETPL(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SETPL: Set byte if not sign (SF == 0). +// +// Forms: +// +// SETPL r8 +// SETPL m8 +// Construct and append a SETPL instruction to the active function. +// Operates on the global context. +func SETPL(mr operand.Op) { ctx.SETPL(mr) } + +// SETPS: Set byte if parity (PF == 1). +// +// Forms: +// +// SETPS r8 +// SETPS m8 +// Construct and append a SETPS instruction to the active function. +func (c *Context) SETPS(mr operand.Op) { + if inst, err := x86.SETPS(mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SETPS: Set byte if parity (PF == 1). +// +// Forms: +// +// SETPS r8 +// SETPS m8 +// Construct and append a SETPS instruction to the active function. +// Operates on the global context. +func SETPS(mr operand.Op) { ctx.SETPS(mr) } + +// SFENCE: Store Fence. +// +// Forms: +// +// SFENCE +// Construct and append a SFENCE instruction to the active function. +func (c *Context) SFENCE() { + if inst, err := x86.SFENCE(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SFENCE: Store Fence. +// +// Forms: +// +// SFENCE +// Construct and append a SFENCE instruction to the active function. +// Operates on the global context. +func SFENCE() { ctx.SFENCE() } + +// SHA1MSG1: Perform an Intermediate Calculation for the Next Four SHA1 Message Doublewords. +// +// Forms: +// +// SHA1MSG1 xmm xmm +// SHA1MSG1 m128 xmm +// Construct and append a SHA1MSG1 instruction to the active function. +func (c *Context) SHA1MSG1(mx, x operand.Op) { + if inst, err := x86.SHA1MSG1(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SHA1MSG1: Perform an Intermediate Calculation for the Next Four SHA1 Message Doublewords. +// +// Forms: +// +// SHA1MSG1 xmm xmm +// SHA1MSG1 m128 xmm +// Construct and append a SHA1MSG1 instruction to the active function. +// Operates on the global context. +func SHA1MSG1(mx, x operand.Op) { ctx.SHA1MSG1(mx, x) } + +// SHA1MSG2: Perform a Final Calculation for the Next Four SHA1 Message Doublewords. +// +// Forms: +// +// SHA1MSG2 xmm xmm +// SHA1MSG2 m128 xmm +// Construct and append a SHA1MSG2 instruction to the active function. +func (c *Context) SHA1MSG2(mx, x operand.Op) { + if inst, err := x86.SHA1MSG2(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SHA1MSG2: Perform a Final Calculation for the Next Four SHA1 Message Doublewords. +// +// Forms: +// +// SHA1MSG2 xmm xmm +// SHA1MSG2 m128 xmm +// Construct and append a SHA1MSG2 instruction to the active function. +// Operates on the global context. +func SHA1MSG2(mx, x operand.Op) { ctx.SHA1MSG2(mx, x) } + +// SHA1NEXTE: Calculate SHA1 State Variable E after Four Rounds. +// +// Forms: +// +// SHA1NEXTE xmm xmm +// SHA1NEXTE m128 xmm +// Construct and append a SHA1NEXTE instruction to the active function. +func (c *Context) SHA1NEXTE(mx, x operand.Op) { + if inst, err := x86.SHA1NEXTE(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SHA1NEXTE: Calculate SHA1 State Variable E after Four Rounds. +// +// Forms: +// +// SHA1NEXTE xmm xmm +// SHA1NEXTE m128 xmm +// Construct and append a SHA1NEXTE instruction to the active function. +// Operates on the global context. +func SHA1NEXTE(mx, x operand.Op) { ctx.SHA1NEXTE(mx, x) } + +// SHA1RNDS4: Perform Four Rounds of SHA1 Operation. +// +// Forms: +// +// SHA1RNDS4 imm2u xmm xmm +// SHA1RNDS4 imm2u m128 xmm +// Construct and append a SHA1RNDS4 instruction to the active function. +func (c *Context) SHA1RNDS4(i, mx, x operand.Op) { + if inst, err := x86.SHA1RNDS4(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SHA1RNDS4: Perform Four Rounds of SHA1 Operation. +// +// Forms: +// +// SHA1RNDS4 imm2u xmm xmm +// SHA1RNDS4 imm2u m128 xmm +// Construct and append a SHA1RNDS4 instruction to the active function. +// Operates on the global context. +func SHA1RNDS4(i, mx, x operand.Op) { ctx.SHA1RNDS4(i, mx, x) } + +// SHA256MSG1: Perform an Intermediate Calculation for the Next Four SHA256 Message Doublewords. +// +// Forms: +// +// SHA256MSG1 xmm xmm +// SHA256MSG1 m128 xmm +// Construct and append a SHA256MSG1 instruction to the active function. +func (c *Context) SHA256MSG1(mx, x operand.Op) { + if inst, err := x86.SHA256MSG1(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SHA256MSG1: Perform an Intermediate Calculation for the Next Four SHA256 Message Doublewords. +// +// Forms: +// +// SHA256MSG1 xmm xmm +// SHA256MSG1 m128 xmm +// Construct and append a SHA256MSG1 instruction to the active function. +// Operates on the global context. +func SHA256MSG1(mx, x operand.Op) { ctx.SHA256MSG1(mx, x) } + +// SHA256MSG2: Perform a Final Calculation for the Next Four SHA256 Message Doublewords. +// +// Forms: +// +// SHA256MSG2 xmm xmm +// SHA256MSG2 m128 xmm +// Construct and append a SHA256MSG2 instruction to the active function. +func (c *Context) SHA256MSG2(mx, x operand.Op) { + if inst, err := x86.SHA256MSG2(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SHA256MSG2: Perform a Final Calculation for the Next Four SHA256 Message Doublewords. +// +// Forms: +// +// SHA256MSG2 xmm xmm +// SHA256MSG2 m128 xmm +// Construct and append a SHA256MSG2 instruction to the active function. +// Operates on the global context. +func SHA256MSG2(mx, x operand.Op) { ctx.SHA256MSG2(mx, x) } + +// SHA256RNDS2: Perform Two Rounds of SHA256 Operation. +// +// Forms: +// +// SHA256RNDS2 xmm0 xmm xmm +// SHA256RNDS2 xmm0 m128 xmm +// Construct and append a SHA256RNDS2 instruction to the active function. +func (c *Context) SHA256RNDS2(x, mx, x1 operand.Op) { + if inst, err := x86.SHA256RNDS2(x, mx, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SHA256RNDS2: Perform Two Rounds of SHA256 Operation. +// +// Forms: +// +// SHA256RNDS2 xmm0 xmm xmm +// SHA256RNDS2 xmm0 m128 xmm +// Construct and append a SHA256RNDS2 instruction to the active function. +// Operates on the global context. +func SHA256RNDS2(x, mx, x1 operand.Op) { ctx.SHA256RNDS2(x, mx, x1) } + +// SHLB: Logical Shift Left. +// +// Forms: +// +// SHLB 1 r8 +// SHLB imm8 r8 +// SHLB cl r8 +// SHLB 1 m8 +// SHLB imm8 m8 +// SHLB cl m8 +// Construct and append a SHLB instruction to the active function. +func (c *Context) SHLB(ci, mr operand.Op) { + if inst, err := x86.SHLB(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SHLB: Logical Shift Left. +// +// Forms: +// +// SHLB 1 r8 +// SHLB imm8 r8 +// SHLB cl r8 +// SHLB 1 m8 +// SHLB imm8 m8 +// SHLB cl m8 +// Construct and append a SHLB instruction to the active function. +// Operates on the global context. +func SHLB(ci, mr operand.Op) { ctx.SHLB(ci, mr) } + +// SHLL: Logical Shift Left. +// +// Forms: +// +// SHLL 1 r32 +// SHLL imm8 r32 +// SHLL cl r32 +// SHLL 1 m32 +// SHLL imm8 m32 +// SHLL cl m32 +// SHLL imm8 r32 r32 +// SHLL cl r32 r32 +// SHLL imm8 r32 m32 +// SHLL cl r32 m32 +// Construct and append a SHLL instruction to the active function. +func (c *Context) SHLL(ops ...operand.Op) { + if inst, err := x86.SHLL(ops...); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SHLL: Logical Shift Left. +// +// Forms: +// +// SHLL 1 r32 +// SHLL imm8 r32 +// SHLL cl r32 +// SHLL 1 m32 +// SHLL imm8 m32 +// SHLL cl m32 +// SHLL imm8 r32 r32 +// SHLL cl r32 r32 +// SHLL imm8 r32 m32 +// SHLL cl r32 m32 +// Construct and append a SHLL instruction to the active function. +// Operates on the global context. +func SHLL(ops ...operand.Op) { ctx.SHLL(ops...) } + +// SHLQ: Logical Shift Left. +// +// Forms: +// +// SHLQ 1 r64 +// SHLQ imm8 r64 +// SHLQ cl r64 +// SHLQ 1 m64 +// SHLQ imm8 m64 +// SHLQ cl m64 +// SHLQ imm8 r64 r64 +// SHLQ cl r64 r64 +// SHLQ imm8 r64 m64 +// SHLQ cl r64 m64 +// Construct and append a SHLQ instruction to the active function. +func (c *Context) SHLQ(ops ...operand.Op) { + if inst, err := x86.SHLQ(ops...); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SHLQ: Logical Shift Left. +// +// Forms: +// +// SHLQ 1 r64 +// SHLQ imm8 r64 +// SHLQ cl r64 +// SHLQ 1 m64 +// SHLQ imm8 m64 +// SHLQ cl m64 +// SHLQ imm8 r64 r64 +// SHLQ cl r64 r64 +// SHLQ imm8 r64 m64 +// SHLQ cl r64 m64 +// Construct and append a SHLQ instruction to the active function. +// Operates on the global context. +func SHLQ(ops ...operand.Op) { ctx.SHLQ(ops...) } + +// SHLW: Logical Shift Left. +// +// Forms: +// +// SHLW 1 r16 +// SHLW imm8 r16 +// SHLW cl r16 +// SHLW 1 m16 +// SHLW imm8 m16 +// SHLW cl m16 +// SHLW imm8 r16 r16 +// SHLW cl r16 r16 +// SHLW imm8 r16 m16 +// SHLW cl r16 m16 +// Construct and append a SHLW instruction to the active function. +func (c *Context) SHLW(ops ...operand.Op) { + if inst, err := x86.SHLW(ops...); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SHLW: Logical Shift Left. +// +// Forms: +// +// SHLW 1 r16 +// SHLW imm8 r16 +// SHLW cl r16 +// SHLW 1 m16 +// SHLW imm8 m16 +// SHLW cl m16 +// SHLW imm8 r16 r16 +// SHLW cl r16 r16 +// SHLW imm8 r16 m16 +// SHLW cl r16 m16 +// Construct and append a SHLW instruction to the active function. +// Operates on the global context. +func SHLW(ops ...operand.Op) { ctx.SHLW(ops...) } + +// SHLXL: Logical Shift Left Without Affecting Flags. +// +// Forms: +// +// SHLXL r32 r32 r32 +// SHLXL r32 m32 r32 +// Construct and append a SHLXL instruction to the active function. +func (c *Context) SHLXL(r, mr, r1 operand.Op) { + if inst, err := x86.SHLXL(r, mr, r1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SHLXL: Logical Shift Left Without Affecting Flags. +// +// Forms: +// +// SHLXL r32 r32 r32 +// SHLXL r32 m32 r32 +// Construct and append a SHLXL instruction to the active function. +// Operates on the global context. +func SHLXL(r, mr, r1 operand.Op) { ctx.SHLXL(r, mr, r1) } + +// SHLXQ: Logical Shift Left Without Affecting Flags. +// +// Forms: +// +// SHLXQ r64 r64 r64 +// SHLXQ r64 m64 r64 +// Construct and append a SHLXQ instruction to the active function. +func (c *Context) SHLXQ(r, mr, r1 operand.Op) { + if inst, err := x86.SHLXQ(r, mr, r1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SHLXQ: Logical Shift Left Without Affecting Flags. +// +// Forms: +// +// SHLXQ r64 r64 r64 +// SHLXQ r64 m64 r64 +// Construct and append a SHLXQ instruction to the active function. +// Operates on the global context. +func SHLXQ(r, mr, r1 operand.Op) { ctx.SHLXQ(r, mr, r1) } + +// SHRB: Logical Shift Right. +// +// Forms: +// +// SHRB 1 r8 +// SHRB imm8 r8 +// SHRB cl r8 +// SHRB 1 m8 +// SHRB imm8 m8 +// SHRB cl m8 +// Construct and append a SHRB instruction to the active function. +func (c *Context) SHRB(ci, mr operand.Op) { + if inst, err := x86.SHRB(ci, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SHRB: Logical Shift Right. +// +// Forms: +// +// SHRB 1 r8 +// SHRB imm8 r8 +// SHRB cl r8 +// SHRB 1 m8 +// SHRB imm8 m8 +// SHRB cl m8 +// Construct and append a SHRB instruction to the active function. +// Operates on the global context. +func SHRB(ci, mr operand.Op) { ctx.SHRB(ci, mr) } + +// SHRL: Logical Shift Right. +// +// Forms: +// +// SHRL 1 r32 +// SHRL imm8 r32 +// SHRL cl r32 +// SHRL 1 m32 +// SHRL imm8 m32 +// SHRL cl m32 +// SHRL imm8 r32 r32 +// SHRL cl r32 r32 +// SHRL imm8 r32 m32 +// SHRL cl r32 m32 +// Construct and append a SHRL instruction to the active function. +func (c *Context) SHRL(ops ...operand.Op) { + if inst, err := x86.SHRL(ops...); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SHRL: Logical Shift Right. +// +// Forms: +// +// SHRL 1 r32 +// SHRL imm8 r32 +// SHRL cl r32 +// SHRL 1 m32 +// SHRL imm8 m32 +// SHRL cl m32 +// SHRL imm8 r32 r32 +// SHRL cl r32 r32 +// SHRL imm8 r32 m32 +// SHRL cl r32 m32 +// Construct and append a SHRL instruction to the active function. +// Operates on the global context. +func SHRL(ops ...operand.Op) { ctx.SHRL(ops...) } + +// SHRQ: Logical Shift Right. +// +// Forms: +// +// SHRQ 1 r64 +// SHRQ imm8 r64 +// SHRQ cl r64 +// SHRQ 1 m64 +// SHRQ imm8 m64 +// SHRQ cl m64 +// SHRQ imm8 r64 r64 +// SHRQ cl r64 r64 +// SHRQ imm8 r64 m64 +// SHRQ cl r64 m64 +// Construct and append a SHRQ instruction to the active function. +func (c *Context) SHRQ(ops ...operand.Op) { + if inst, err := x86.SHRQ(ops...); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SHRQ: Logical Shift Right. +// +// Forms: +// +// SHRQ 1 r64 +// SHRQ imm8 r64 +// SHRQ cl r64 +// SHRQ 1 m64 +// SHRQ imm8 m64 +// SHRQ cl m64 +// SHRQ imm8 r64 r64 +// SHRQ cl r64 r64 +// SHRQ imm8 r64 m64 +// SHRQ cl r64 m64 +// Construct and append a SHRQ instruction to the active function. +// Operates on the global context. +func SHRQ(ops ...operand.Op) { ctx.SHRQ(ops...) } + +// SHRW: Logical Shift Right. +// +// Forms: +// +// SHRW 1 r16 +// SHRW imm8 r16 +// SHRW cl r16 +// SHRW 1 m16 +// SHRW imm8 m16 +// SHRW cl m16 +// SHRW imm8 r16 r16 +// SHRW cl r16 r16 +// SHRW imm8 r16 m16 +// SHRW cl r16 m16 +// Construct and append a SHRW instruction to the active function. +func (c *Context) SHRW(ops ...operand.Op) { + if inst, err := x86.SHRW(ops...); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SHRW: Logical Shift Right. +// +// Forms: +// +// SHRW 1 r16 +// SHRW imm8 r16 +// SHRW cl r16 +// SHRW 1 m16 +// SHRW imm8 m16 +// SHRW cl m16 +// SHRW imm8 r16 r16 +// SHRW cl r16 r16 +// SHRW imm8 r16 m16 +// SHRW cl r16 m16 +// Construct and append a SHRW instruction to the active function. +// Operates on the global context. +func SHRW(ops ...operand.Op) { ctx.SHRW(ops...) } + +// SHRXL: Logical Shift Right Without Affecting Flags. +// +// Forms: +// +// SHRXL r32 r32 r32 +// SHRXL r32 m32 r32 +// Construct and append a SHRXL instruction to the active function. +func (c *Context) SHRXL(r, mr, r1 operand.Op) { + if inst, err := x86.SHRXL(r, mr, r1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SHRXL: Logical Shift Right Without Affecting Flags. +// +// Forms: +// +// SHRXL r32 r32 r32 +// SHRXL r32 m32 r32 +// Construct and append a SHRXL instruction to the active function. +// Operates on the global context. +func SHRXL(r, mr, r1 operand.Op) { ctx.SHRXL(r, mr, r1) } + +// SHRXQ: Logical Shift Right Without Affecting Flags. +// +// Forms: +// +// SHRXQ r64 r64 r64 +// SHRXQ r64 m64 r64 +// Construct and append a SHRXQ instruction to the active function. +func (c *Context) SHRXQ(r, mr, r1 operand.Op) { + if inst, err := x86.SHRXQ(r, mr, r1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SHRXQ: Logical Shift Right Without Affecting Flags. +// +// Forms: +// +// SHRXQ r64 r64 r64 +// SHRXQ r64 m64 r64 +// Construct and append a SHRXQ instruction to the active function. +// Operates on the global context. +func SHRXQ(r, mr, r1 operand.Op) { ctx.SHRXQ(r, mr, r1) } + +// SHUFPD: Shuffle Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// SHUFPD imm8 xmm xmm +// SHUFPD imm8 m128 xmm +// Construct and append a SHUFPD instruction to the active function. +func (c *Context) SHUFPD(i, mx, x operand.Op) { + if inst, err := x86.SHUFPD(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SHUFPD: Shuffle Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// SHUFPD imm8 xmm xmm +// SHUFPD imm8 m128 xmm +// Construct and append a SHUFPD instruction to the active function. +// Operates on the global context. +func SHUFPD(i, mx, x operand.Op) { ctx.SHUFPD(i, mx, x) } + +// SHUFPS: Shuffle Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// SHUFPS imm8 xmm xmm +// SHUFPS imm8 m128 xmm +// Construct and append a SHUFPS instruction to the active function. +func (c *Context) SHUFPS(i, mx, x operand.Op) { + if inst, err := x86.SHUFPS(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SHUFPS: Shuffle Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// SHUFPS imm8 xmm xmm +// SHUFPS imm8 m128 xmm +// Construct and append a SHUFPS instruction to the active function. +// Operates on the global context. +func SHUFPS(i, mx, x operand.Op) { ctx.SHUFPS(i, mx, x) } + +// SQRTPD: Compute Square Roots of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// SQRTPD xmm xmm +// SQRTPD m128 xmm +// Construct and append a SQRTPD instruction to the active function. +func (c *Context) SQRTPD(mx, x operand.Op) { + if inst, err := x86.SQRTPD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SQRTPD: Compute Square Roots of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// SQRTPD xmm xmm +// SQRTPD m128 xmm +// Construct and append a SQRTPD instruction to the active function. +// Operates on the global context. +func SQRTPD(mx, x operand.Op) { ctx.SQRTPD(mx, x) } + +// SQRTPS: Compute Square Roots of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// SQRTPS xmm xmm +// SQRTPS m128 xmm +// Construct and append a SQRTPS instruction to the active function. +func (c *Context) SQRTPS(mx, x operand.Op) { + if inst, err := x86.SQRTPS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SQRTPS: Compute Square Roots of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// SQRTPS xmm xmm +// SQRTPS m128 xmm +// Construct and append a SQRTPS instruction to the active function. +// Operates on the global context. +func SQRTPS(mx, x operand.Op) { ctx.SQRTPS(mx, x) } + +// SQRTSD: Compute Square Root of Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// SQRTSD xmm xmm +// SQRTSD m64 xmm +// Construct and append a SQRTSD instruction to the active function. +func (c *Context) SQRTSD(mx, x operand.Op) { + if inst, err := x86.SQRTSD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SQRTSD: Compute Square Root of Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// SQRTSD xmm xmm +// SQRTSD m64 xmm +// Construct and append a SQRTSD instruction to the active function. +// Operates on the global context. +func SQRTSD(mx, x operand.Op) { ctx.SQRTSD(mx, x) } + +// SQRTSS: Compute Square Root of Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// SQRTSS xmm xmm +// SQRTSS m32 xmm +// Construct and append a SQRTSS instruction to the active function. +func (c *Context) SQRTSS(mx, x operand.Op) { + if inst, err := x86.SQRTSS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SQRTSS: Compute Square Root of Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// SQRTSS xmm xmm +// SQRTSS m32 xmm +// Construct and append a SQRTSS instruction to the active function. +// Operates on the global context. +func SQRTSS(mx, x operand.Op) { ctx.SQRTSS(mx, x) } + +// STC: Set Carry Flag. +// +// Forms: +// +// STC +// Construct and append a STC instruction to the active function. +func (c *Context) STC() { + if inst, err := x86.STC(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// STC: Set Carry Flag. +// +// Forms: +// +// STC +// Construct and append a STC instruction to the active function. +// Operates on the global context. +func STC() { ctx.STC() } + +// STD: Set Direction Flag. +// +// Forms: +// +// STD +// Construct and append a STD instruction to the active function. +func (c *Context) STD() { + if inst, err := x86.STD(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// STD: Set Direction Flag. +// +// Forms: +// +// STD +// Construct and append a STD instruction to the active function. +// Operates on the global context. +func STD() { ctx.STD() } + +// STMXCSR: Store MXCSR Register State. +// +// Forms: +// +// STMXCSR m32 +// Construct and append a STMXCSR instruction to the active function. +func (c *Context) STMXCSR(m operand.Op) { + if inst, err := x86.STMXCSR(m); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// STMXCSR: Store MXCSR Register State. +// +// Forms: +// +// STMXCSR m32 +// Construct and append a STMXCSR instruction to the active function. +// Operates on the global context. +func STMXCSR(m operand.Op) { ctx.STMXCSR(m) } + +// SUBB: Subtract. +// +// Forms: +// +// SUBB imm8 al +// SUBB imm8 r8 +// SUBB r8 r8 +// SUBB m8 r8 +// SUBB imm8 m8 +// SUBB r8 m8 +// Construct and append a SUBB instruction to the active function. +func (c *Context) SUBB(imr, amr operand.Op) { + if inst, err := x86.SUBB(imr, amr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SUBB: Subtract. +// +// Forms: +// +// SUBB imm8 al +// SUBB imm8 r8 +// SUBB r8 r8 +// SUBB m8 r8 +// SUBB imm8 m8 +// SUBB r8 m8 +// Construct and append a SUBB instruction to the active function. +// Operates on the global context. +func SUBB(imr, amr operand.Op) { ctx.SUBB(imr, amr) } + +// SUBL: Subtract. +// +// Forms: +// +// SUBL imm32 eax +// SUBL imm8 r32 +// SUBL imm32 r32 +// SUBL r32 r32 +// SUBL m32 r32 +// SUBL imm8 m32 +// SUBL imm32 m32 +// SUBL r32 m32 +// Construct and append a SUBL instruction to the active function. +func (c *Context) SUBL(imr, emr operand.Op) { + if inst, err := x86.SUBL(imr, emr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SUBL: Subtract. +// +// Forms: +// +// SUBL imm32 eax +// SUBL imm8 r32 +// SUBL imm32 r32 +// SUBL r32 r32 +// SUBL m32 r32 +// SUBL imm8 m32 +// SUBL imm32 m32 +// SUBL r32 m32 +// Construct and append a SUBL instruction to the active function. +// Operates on the global context. +func SUBL(imr, emr operand.Op) { ctx.SUBL(imr, emr) } + +// SUBPD: Subtract Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// SUBPD xmm xmm +// SUBPD m128 xmm +// Construct and append a SUBPD instruction to the active function. +func (c *Context) SUBPD(mx, x operand.Op) { + if inst, err := x86.SUBPD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SUBPD: Subtract Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// SUBPD xmm xmm +// SUBPD m128 xmm +// Construct and append a SUBPD instruction to the active function. +// Operates on the global context. +func SUBPD(mx, x operand.Op) { ctx.SUBPD(mx, x) } + +// SUBPS: Subtract Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// SUBPS xmm xmm +// SUBPS m128 xmm +// Construct and append a SUBPS instruction to the active function. +func (c *Context) SUBPS(mx, x operand.Op) { + if inst, err := x86.SUBPS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SUBPS: Subtract Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// SUBPS xmm xmm +// SUBPS m128 xmm +// Construct and append a SUBPS instruction to the active function. +// Operates on the global context. +func SUBPS(mx, x operand.Op) { ctx.SUBPS(mx, x) } + +// SUBQ: Subtract. +// +// Forms: +// +// SUBQ imm32 rax +// SUBQ imm8 r64 +// SUBQ imm32 r64 +// SUBQ r64 r64 +// SUBQ m64 r64 +// SUBQ imm8 m64 +// SUBQ imm32 m64 +// SUBQ r64 m64 +// Construct and append a SUBQ instruction to the active function. +func (c *Context) SUBQ(imr, mr operand.Op) { + if inst, err := x86.SUBQ(imr, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SUBQ: Subtract. +// +// Forms: +// +// SUBQ imm32 rax +// SUBQ imm8 r64 +// SUBQ imm32 r64 +// SUBQ r64 r64 +// SUBQ m64 r64 +// SUBQ imm8 m64 +// SUBQ imm32 m64 +// SUBQ r64 m64 +// Construct and append a SUBQ instruction to the active function. +// Operates on the global context. +func SUBQ(imr, mr operand.Op) { ctx.SUBQ(imr, mr) } + +// SUBSD: Subtract Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// SUBSD xmm xmm +// SUBSD m64 xmm +// Construct and append a SUBSD instruction to the active function. +func (c *Context) SUBSD(mx, x operand.Op) { + if inst, err := x86.SUBSD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SUBSD: Subtract Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// SUBSD xmm xmm +// SUBSD m64 xmm +// Construct and append a SUBSD instruction to the active function. +// Operates on the global context. +func SUBSD(mx, x operand.Op) { ctx.SUBSD(mx, x) } + +// SUBSS: Subtract Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// SUBSS xmm xmm +// SUBSS m32 xmm +// Construct and append a SUBSS instruction to the active function. +func (c *Context) SUBSS(mx, x operand.Op) { + if inst, err := x86.SUBSS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SUBSS: Subtract Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// SUBSS xmm xmm +// SUBSS m32 xmm +// Construct and append a SUBSS instruction to the active function. +// Operates on the global context. +func SUBSS(mx, x operand.Op) { ctx.SUBSS(mx, x) } + +// SUBW: Subtract. +// +// Forms: +// +// SUBW imm16 ax +// SUBW imm8 r16 +// SUBW imm16 r16 +// SUBW r16 r16 +// SUBW m16 r16 +// SUBW imm8 m16 +// SUBW imm16 m16 +// SUBW r16 m16 +// Construct and append a SUBW instruction to the active function. +func (c *Context) SUBW(imr, amr operand.Op) { + if inst, err := x86.SUBW(imr, amr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SUBW: Subtract. +// +// Forms: +// +// SUBW imm16 ax +// SUBW imm8 r16 +// SUBW imm16 r16 +// SUBW r16 r16 +// SUBW m16 r16 +// SUBW imm8 m16 +// SUBW imm16 m16 +// SUBW r16 m16 +// Construct and append a SUBW instruction to the active function. +// Operates on the global context. +func SUBW(imr, amr operand.Op) { ctx.SUBW(imr, amr) } + +// SYSCALL: Fast System Call. +// +// Forms: +// +// SYSCALL +// Construct and append a SYSCALL instruction to the active function. +func (c *Context) SYSCALL() { + if inst, err := x86.SYSCALL(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// SYSCALL: Fast System Call. +// +// Forms: +// +// SYSCALL +// Construct and append a SYSCALL instruction to the active function. +// Operates on the global context. +func SYSCALL() { ctx.SYSCALL() } + +// TESTB: Logical Compare. +// +// Forms: +// +// TESTB imm8 al +// TESTB imm8 r8 +// TESTB r8 r8 +// TESTB imm8 m8 +// TESTB r8 m8 +// Construct and append a TESTB instruction to the active function. +func (c *Context) TESTB(ir, amr operand.Op) { + if inst, err := x86.TESTB(ir, amr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// TESTB: Logical Compare. +// +// Forms: +// +// TESTB imm8 al +// TESTB imm8 r8 +// TESTB r8 r8 +// TESTB imm8 m8 +// TESTB r8 m8 +// Construct and append a TESTB instruction to the active function. +// Operates on the global context. +func TESTB(ir, amr operand.Op) { ctx.TESTB(ir, amr) } + +// TESTL: Logical Compare. +// +// Forms: +// +// TESTL imm32 eax +// TESTL imm32 r32 +// TESTL r32 r32 +// TESTL imm32 m32 +// TESTL r32 m32 +// Construct and append a TESTL instruction to the active function. +func (c *Context) TESTL(ir, emr operand.Op) { + if inst, err := x86.TESTL(ir, emr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// TESTL: Logical Compare. +// +// Forms: +// +// TESTL imm32 eax +// TESTL imm32 r32 +// TESTL r32 r32 +// TESTL imm32 m32 +// TESTL r32 m32 +// Construct and append a TESTL instruction to the active function. +// Operates on the global context. +func TESTL(ir, emr operand.Op) { ctx.TESTL(ir, emr) } + +// TESTQ: Logical Compare. +// +// Forms: +// +// TESTQ imm32 rax +// TESTQ imm32 r64 +// TESTQ r64 r64 +// TESTQ imm32 m64 +// TESTQ r64 m64 +// Construct and append a TESTQ instruction to the active function. +func (c *Context) TESTQ(ir, mr operand.Op) { + if inst, err := x86.TESTQ(ir, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// TESTQ: Logical Compare. +// +// Forms: +// +// TESTQ imm32 rax +// TESTQ imm32 r64 +// TESTQ r64 r64 +// TESTQ imm32 m64 +// TESTQ r64 m64 +// Construct and append a TESTQ instruction to the active function. +// Operates on the global context. +func TESTQ(ir, mr operand.Op) { ctx.TESTQ(ir, mr) } + +// TESTW: Logical Compare. +// +// Forms: +// +// TESTW imm16 ax +// TESTW imm16 r16 +// TESTW r16 r16 +// TESTW imm16 m16 +// TESTW r16 m16 +// Construct and append a TESTW instruction to the active function. +func (c *Context) TESTW(ir, amr operand.Op) { + if inst, err := x86.TESTW(ir, amr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// TESTW: Logical Compare. +// +// Forms: +// +// TESTW imm16 ax +// TESTW imm16 r16 +// TESTW r16 r16 +// TESTW imm16 m16 +// TESTW r16 m16 +// Construct and append a TESTW instruction to the active function. +// Operates on the global context. +func TESTW(ir, amr operand.Op) { ctx.TESTW(ir, amr) } + +// TZCNTL: Count the Number of Trailing Zero Bits. +// +// Forms: +// +// TZCNTL r32 r32 +// TZCNTL m32 r32 +// Construct and append a TZCNTL instruction to the active function. +func (c *Context) TZCNTL(mr, r operand.Op) { + if inst, err := x86.TZCNTL(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// TZCNTL: Count the Number of Trailing Zero Bits. +// +// Forms: +// +// TZCNTL r32 r32 +// TZCNTL m32 r32 +// Construct and append a TZCNTL instruction to the active function. +// Operates on the global context. +func TZCNTL(mr, r operand.Op) { ctx.TZCNTL(mr, r) } + +// TZCNTQ: Count the Number of Trailing Zero Bits. +// +// Forms: +// +// TZCNTQ r64 r64 +// TZCNTQ m64 r64 +// Construct and append a TZCNTQ instruction to the active function. +func (c *Context) TZCNTQ(mr, r operand.Op) { + if inst, err := x86.TZCNTQ(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// TZCNTQ: Count the Number of Trailing Zero Bits. +// +// Forms: +// +// TZCNTQ r64 r64 +// TZCNTQ m64 r64 +// Construct and append a TZCNTQ instruction to the active function. +// Operates on the global context. +func TZCNTQ(mr, r operand.Op) { ctx.TZCNTQ(mr, r) } + +// TZCNTW: Count the Number of Trailing Zero Bits. +// +// Forms: +// +// TZCNTW r16 r16 +// TZCNTW m16 r16 +// Construct and append a TZCNTW instruction to the active function. +func (c *Context) TZCNTW(mr, r operand.Op) { + if inst, err := x86.TZCNTW(mr, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// TZCNTW: Count the Number of Trailing Zero Bits. +// +// Forms: +// +// TZCNTW r16 r16 +// TZCNTW m16 r16 +// Construct and append a TZCNTW instruction to the active function. +// Operates on the global context. +func TZCNTW(mr, r operand.Op) { ctx.TZCNTW(mr, r) } + +// UCOMISD: Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// UCOMISD xmm xmm +// UCOMISD m64 xmm +// Construct and append a UCOMISD instruction to the active function. +func (c *Context) UCOMISD(mx, x operand.Op) { + if inst, err := x86.UCOMISD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// UCOMISD: Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// UCOMISD xmm xmm +// UCOMISD m64 xmm +// Construct and append a UCOMISD instruction to the active function. +// Operates on the global context. +func UCOMISD(mx, x operand.Op) { ctx.UCOMISD(mx, x) } + +// UCOMISS: Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// UCOMISS xmm xmm +// UCOMISS m32 xmm +// Construct and append a UCOMISS instruction to the active function. +func (c *Context) UCOMISS(mx, x operand.Op) { + if inst, err := x86.UCOMISS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// UCOMISS: Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// UCOMISS xmm xmm +// UCOMISS m32 xmm +// Construct and append a UCOMISS instruction to the active function. +// Operates on the global context. +func UCOMISS(mx, x operand.Op) { ctx.UCOMISS(mx, x) } + +// UD2: Undefined Instruction. +// +// Forms: +// +// UD2 +// Construct and append a UD2 instruction to the active function. +func (c *Context) UD2() { + if inst, err := x86.UD2(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// UD2: Undefined Instruction. +// +// Forms: +// +// UD2 +// Construct and append a UD2 instruction to the active function. +// Operates on the global context. +func UD2() { ctx.UD2() } + +// UNPCKHPD: Unpack and Interleave High Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// UNPCKHPD xmm xmm +// UNPCKHPD m128 xmm +// Construct and append a UNPCKHPD instruction to the active function. +func (c *Context) UNPCKHPD(mx, x operand.Op) { + if inst, err := x86.UNPCKHPD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// UNPCKHPD: Unpack and Interleave High Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// UNPCKHPD xmm xmm +// UNPCKHPD m128 xmm +// Construct and append a UNPCKHPD instruction to the active function. +// Operates on the global context. +func UNPCKHPD(mx, x operand.Op) { ctx.UNPCKHPD(mx, x) } + +// UNPCKHPS: Unpack and Interleave High Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// UNPCKHPS xmm xmm +// UNPCKHPS m128 xmm +// Construct and append a UNPCKHPS instruction to the active function. +func (c *Context) UNPCKHPS(mx, x operand.Op) { + if inst, err := x86.UNPCKHPS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// UNPCKHPS: Unpack and Interleave High Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// UNPCKHPS xmm xmm +// UNPCKHPS m128 xmm +// Construct and append a UNPCKHPS instruction to the active function. +// Operates on the global context. +func UNPCKHPS(mx, x operand.Op) { ctx.UNPCKHPS(mx, x) } + +// UNPCKLPD: Unpack and Interleave Low Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// UNPCKLPD xmm xmm +// UNPCKLPD m128 xmm +// Construct and append a UNPCKLPD instruction to the active function. +func (c *Context) UNPCKLPD(mx, x operand.Op) { + if inst, err := x86.UNPCKLPD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// UNPCKLPD: Unpack and Interleave Low Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// UNPCKLPD xmm xmm +// UNPCKLPD m128 xmm +// Construct and append a UNPCKLPD instruction to the active function. +// Operates on the global context. +func UNPCKLPD(mx, x operand.Op) { ctx.UNPCKLPD(mx, x) } + +// UNPCKLPS: Unpack and Interleave Low Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// UNPCKLPS xmm xmm +// UNPCKLPS m128 xmm +// Construct and append a UNPCKLPS instruction to the active function. +func (c *Context) UNPCKLPS(mx, x operand.Op) { + if inst, err := x86.UNPCKLPS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// UNPCKLPS: Unpack and Interleave Low Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// UNPCKLPS xmm xmm +// UNPCKLPS m128 xmm +// Construct and append a UNPCKLPS instruction to the active function. +// Operates on the global context. +func UNPCKLPS(mx, x operand.Op) { ctx.UNPCKLPS(mx, x) } + +// VADDPD: Add Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VADDPD xmm xmm xmm +// VADDPD m128 xmm xmm +// VADDPD ymm ymm ymm +// VADDPD m256 ymm ymm +// Construct and append a VADDPD instruction to the active function. +func (c *Context) VADDPD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VADDPD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VADDPD: Add Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VADDPD xmm xmm xmm +// VADDPD m128 xmm xmm +// VADDPD ymm ymm ymm +// VADDPD m256 ymm ymm +// Construct and append a VADDPD instruction to the active function. +// Operates on the global context. +func VADDPD(mxy, xy, xy1 operand.Op) { ctx.VADDPD(mxy, xy, xy1) } + +// VADDPS: Add Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VADDPS xmm xmm xmm +// VADDPS m128 xmm xmm +// VADDPS ymm ymm ymm +// VADDPS m256 ymm ymm +// Construct and append a VADDPS instruction to the active function. +func (c *Context) VADDPS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VADDPS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VADDPS: Add Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VADDPS xmm xmm xmm +// VADDPS m128 xmm xmm +// VADDPS ymm ymm ymm +// VADDPS m256 ymm ymm +// Construct and append a VADDPS instruction to the active function. +// Operates on the global context. +func VADDPS(mxy, xy, xy1 operand.Op) { ctx.VADDPS(mxy, xy, xy1) } + +// VADDSD: Add Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VADDSD xmm xmm xmm +// VADDSD m64 xmm xmm +// Construct and append a VADDSD instruction to the active function. +func (c *Context) VADDSD(mx, x, x1 operand.Op) { + if inst, err := x86.VADDSD(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VADDSD: Add Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VADDSD xmm xmm xmm +// VADDSD m64 xmm xmm +// Construct and append a VADDSD instruction to the active function. +// Operates on the global context. +func VADDSD(mx, x, x1 operand.Op) { ctx.VADDSD(mx, x, x1) } + +// VADDSS: Add Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VADDSS xmm xmm xmm +// VADDSS m32 xmm xmm +// Construct and append a VADDSS instruction to the active function. +func (c *Context) VADDSS(mx, x, x1 operand.Op) { + if inst, err := x86.VADDSS(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VADDSS: Add Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VADDSS xmm xmm xmm +// VADDSS m32 xmm xmm +// Construct and append a VADDSS instruction to the active function. +// Operates on the global context. +func VADDSS(mx, x, x1 operand.Op) { ctx.VADDSS(mx, x, x1) } + +// VADDSUBPD: Packed Double-FP Add/Subtract. +// +// Forms: +// +// VADDSUBPD xmm xmm xmm +// VADDSUBPD m128 xmm xmm +// VADDSUBPD ymm ymm ymm +// VADDSUBPD m256 ymm ymm +// Construct and append a VADDSUBPD instruction to the active function. +func (c *Context) VADDSUBPD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VADDSUBPD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VADDSUBPD: Packed Double-FP Add/Subtract. +// +// Forms: +// +// VADDSUBPD xmm xmm xmm +// VADDSUBPD m128 xmm xmm +// VADDSUBPD ymm ymm ymm +// VADDSUBPD m256 ymm ymm +// Construct and append a VADDSUBPD instruction to the active function. +// Operates on the global context. +func VADDSUBPD(mxy, xy, xy1 operand.Op) { ctx.VADDSUBPD(mxy, xy, xy1) } + +// VADDSUBPS: Packed Single-FP Add/Subtract. +// +// Forms: +// +// VADDSUBPS xmm xmm xmm +// VADDSUBPS m128 xmm xmm +// VADDSUBPS ymm ymm ymm +// VADDSUBPS m256 ymm ymm +// Construct and append a VADDSUBPS instruction to the active function. +func (c *Context) VADDSUBPS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VADDSUBPS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VADDSUBPS: Packed Single-FP Add/Subtract. +// +// Forms: +// +// VADDSUBPS xmm xmm xmm +// VADDSUBPS m128 xmm xmm +// VADDSUBPS ymm ymm ymm +// VADDSUBPS m256 ymm ymm +// Construct and append a VADDSUBPS instruction to the active function. +// Operates on the global context. +func VADDSUBPS(mxy, xy, xy1 operand.Op) { ctx.VADDSUBPS(mxy, xy, xy1) } + +// VAESDEC: Perform One Round of an AES Decryption Flow. +// +// Forms: +// +// VAESDEC xmm xmm xmm +// VAESDEC m128 xmm xmm +// Construct and append a VAESDEC instruction to the active function. +func (c *Context) VAESDEC(mx, x, x1 operand.Op) { + if inst, err := x86.VAESDEC(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VAESDEC: Perform One Round of an AES Decryption Flow. +// +// Forms: +// +// VAESDEC xmm xmm xmm +// VAESDEC m128 xmm xmm +// Construct and append a VAESDEC instruction to the active function. +// Operates on the global context. +func VAESDEC(mx, x, x1 operand.Op) { ctx.VAESDEC(mx, x, x1) } + +// VAESDECLAST: Perform Last Round of an AES Decryption Flow. +// +// Forms: +// +// VAESDECLAST xmm xmm xmm +// VAESDECLAST m128 xmm xmm +// Construct and append a VAESDECLAST instruction to the active function. +func (c *Context) VAESDECLAST(mx, x, x1 operand.Op) { + if inst, err := x86.VAESDECLAST(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VAESDECLAST: Perform Last Round of an AES Decryption Flow. +// +// Forms: +// +// VAESDECLAST xmm xmm xmm +// VAESDECLAST m128 xmm xmm +// Construct and append a VAESDECLAST instruction to the active function. +// Operates on the global context. +func VAESDECLAST(mx, x, x1 operand.Op) { ctx.VAESDECLAST(mx, x, x1) } + +// VAESENC: Perform One Round of an AES Encryption Flow. +// +// Forms: +// +// VAESENC xmm xmm xmm +// VAESENC m128 xmm xmm +// Construct and append a VAESENC instruction to the active function. +func (c *Context) VAESENC(mx, x, x1 operand.Op) { + if inst, err := x86.VAESENC(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VAESENC: Perform One Round of an AES Encryption Flow. +// +// Forms: +// +// VAESENC xmm xmm xmm +// VAESENC m128 xmm xmm +// Construct and append a VAESENC instruction to the active function. +// Operates on the global context. +func VAESENC(mx, x, x1 operand.Op) { ctx.VAESENC(mx, x, x1) } + +// VAESENCLAST: Perform Last Round of an AES Encryption Flow. +// +// Forms: +// +// VAESENCLAST xmm xmm xmm +// VAESENCLAST m128 xmm xmm +// Construct and append a VAESENCLAST instruction to the active function. +func (c *Context) VAESENCLAST(mx, x, x1 operand.Op) { + if inst, err := x86.VAESENCLAST(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VAESENCLAST: Perform Last Round of an AES Encryption Flow. +// +// Forms: +// +// VAESENCLAST xmm xmm xmm +// VAESENCLAST m128 xmm xmm +// Construct and append a VAESENCLAST instruction to the active function. +// Operates on the global context. +func VAESENCLAST(mx, x, x1 operand.Op) { ctx.VAESENCLAST(mx, x, x1) } + +// VAESIMC: Perform the AES InvMixColumn Transformation. +// +// Forms: +// +// VAESIMC xmm xmm +// VAESIMC m128 xmm +// Construct and append a VAESIMC instruction to the active function. +func (c *Context) VAESIMC(mx, x operand.Op) { + if inst, err := x86.VAESIMC(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VAESIMC: Perform the AES InvMixColumn Transformation. +// +// Forms: +// +// VAESIMC xmm xmm +// VAESIMC m128 xmm +// Construct and append a VAESIMC instruction to the active function. +// Operates on the global context. +func VAESIMC(mx, x operand.Op) { ctx.VAESIMC(mx, x) } + +// VAESKEYGENASSIST: AES Round Key Generation Assist. +// +// Forms: +// +// VAESKEYGENASSIST imm8 xmm xmm +// VAESKEYGENASSIST imm8 m128 xmm +// Construct and append a VAESKEYGENASSIST instruction to the active function. +func (c *Context) VAESKEYGENASSIST(i, mx, x operand.Op) { + if inst, err := x86.VAESKEYGENASSIST(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VAESKEYGENASSIST: AES Round Key Generation Assist. +// +// Forms: +// +// VAESKEYGENASSIST imm8 xmm xmm +// VAESKEYGENASSIST imm8 m128 xmm +// Construct and append a VAESKEYGENASSIST instruction to the active function. +// Operates on the global context. +func VAESKEYGENASSIST(i, mx, x operand.Op) { ctx.VAESKEYGENASSIST(i, mx, x) } + +// VANDNPD: Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VANDNPD xmm xmm xmm +// VANDNPD m128 xmm xmm +// VANDNPD ymm ymm ymm +// VANDNPD m256 ymm ymm +// Construct and append a VANDNPD instruction to the active function. +func (c *Context) VANDNPD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VANDNPD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VANDNPD: Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VANDNPD xmm xmm xmm +// VANDNPD m128 xmm xmm +// VANDNPD ymm ymm ymm +// VANDNPD m256 ymm ymm +// Construct and append a VANDNPD instruction to the active function. +// Operates on the global context. +func VANDNPD(mxy, xy, xy1 operand.Op) { ctx.VANDNPD(mxy, xy, xy1) } + +// VANDNPS: Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VANDNPS xmm xmm xmm +// VANDNPS m128 xmm xmm +// VANDNPS ymm ymm ymm +// VANDNPS m256 ymm ymm +// Construct and append a VANDNPS instruction to the active function. +func (c *Context) VANDNPS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VANDNPS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VANDNPS: Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VANDNPS xmm xmm xmm +// VANDNPS m128 xmm xmm +// VANDNPS ymm ymm ymm +// VANDNPS m256 ymm ymm +// Construct and append a VANDNPS instruction to the active function. +// Operates on the global context. +func VANDNPS(mxy, xy, xy1 operand.Op) { ctx.VANDNPS(mxy, xy, xy1) } + +// VANDPD: Bitwise Logical AND of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VANDPD xmm xmm xmm +// VANDPD m128 xmm xmm +// VANDPD ymm ymm ymm +// VANDPD m256 ymm ymm +// Construct and append a VANDPD instruction to the active function. +func (c *Context) VANDPD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VANDPD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VANDPD: Bitwise Logical AND of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VANDPD xmm xmm xmm +// VANDPD m128 xmm xmm +// VANDPD ymm ymm ymm +// VANDPD m256 ymm ymm +// Construct and append a VANDPD instruction to the active function. +// Operates on the global context. +func VANDPD(mxy, xy, xy1 operand.Op) { ctx.VANDPD(mxy, xy, xy1) } + +// VANDPS: Bitwise Logical AND of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VANDPS xmm xmm xmm +// VANDPS m128 xmm xmm +// VANDPS ymm ymm ymm +// VANDPS m256 ymm ymm +// Construct and append a VANDPS instruction to the active function. +func (c *Context) VANDPS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VANDPS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VANDPS: Bitwise Logical AND of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VANDPS xmm xmm xmm +// VANDPS m128 xmm xmm +// VANDPS ymm ymm ymm +// VANDPS m256 ymm ymm +// Construct and append a VANDPS instruction to the active function. +// Operates on the global context. +func VANDPS(mxy, xy, xy1 operand.Op) { ctx.VANDPS(mxy, xy, xy1) } + +// VBLENDPD: Blend Packed Double Precision Floating-Point Values. +// +// Forms: +// +// VBLENDPD imm8 xmm xmm xmm +// VBLENDPD imm8 m128 xmm xmm +// VBLENDPD imm8 ymm ymm ymm +// VBLENDPD imm8 m256 ymm ymm +// Construct and append a VBLENDPD instruction to the active function. +func (c *Context) VBLENDPD(i, mxy, xy, xy1 operand.Op) { + if inst, err := x86.VBLENDPD(i, mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VBLENDPD: Blend Packed Double Precision Floating-Point Values. +// +// Forms: +// +// VBLENDPD imm8 xmm xmm xmm +// VBLENDPD imm8 m128 xmm xmm +// VBLENDPD imm8 ymm ymm ymm +// VBLENDPD imm8 m256 ymm ymm +// Construct and append a VBLENDPD instruction to the active function. +// Operates on the global context. +func VBLENDPD(i, mxy, xy, xy1 operand.Op) { ctx.VBLENDPD(i, mxy, xy, xy1) } + +// VBLENDPS: Blend Packed Single Precision Floating-Point Values. +// +// Forms: +// +// VBLENDPS imm8 xmm xmm xmm +// VBLENDPS imm8 m128 xmm xmm +// VBLENDPS imm8 ymm ymm ymm +// VBLENDPS imm8 m256 ymm ymm +// Construct and append a VBLENDPS instruction to the active function. +func (c *Context) VBLENDPS(i, mxy, xy, xy1 operand.Op) { + if inst, err := x86.VBLENDPS(i, mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VBLENDPS: Blend Packed Single Precision Floating-Point Values. +// +// Forms: +// +// VBLENDPS imm8 xmm xmm xmm +// VBLENDPS imm8 m128 xmm xmm +// VBLENDPS imm8 ymm ymm ymm +// VBLENDPS imm8 m256 ymm ymm +// Construct and append a VBLENDPS instruction to the active function. +// Operates on the global context. +func VBLENDPS(i, mxy, xy, xy1 operand.Op) { ctx.VBLENDPS(i, mxy, xy, xy1) } + +// VBLENDVPD: Variable Blend Packed Double Precision Floating-Point Values. +// +// Forms: +// +// VBLENDVPD xmm xmm xmm xmm +// VBLENDVPD xmm m128 xmm xmm +// VBLENDVPD ymm ymm ymm ymm +// VBLENDVPD ymm m256 ymm ymm +// Construct and append a VBLENDVPD instruction to the active function. +func (c *Context) VBLENDVPD(xy, mxy, xy1, xy2 operand.Op) { + if inst, err := x86.VBLENDVPD(xy, mxy, xy1, xy2); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VBLENDVPD: Variable Blend Packed Double Precision Floating-Point Values. +// +// Forms: +// +// VBLENDVPD xmm xmm xmm xmm +// VBLENDVPD xmm m128 xmm xmm +// VBLENDVPD ymm ymm ymm ymm +// VBLENDVPD ymm m256 ymm ymm +// Construct and append a VBLENDVPD instruction to the active function. +// Operates on the global context. +func VBLENDVPD(xy, mxy, xy1, xy2 operand.Op) { ctx.VBLENDVPD(xy, mxy, xy1, xy2) } + +// VBLENDVPS: Variable Blend Packed Single Precision Floating-Point Values. +// +// Forms: +// +// VBLENDVPS xmm xmm xmm xmm +// VBLENDVPS xmm m128 xmm xmm +// VBLENDVPS ymm ymm ymm ymm +// VBLENDVPS ymm m256 ymm ymm +// Construct and append a VBLENDVPS instruction to the active function. +func (c *Context) VBLENDVPS(xy, mxy, xy1, xy2 operand.Op) { + if inst, err := x86.VBLENDVPS(xy, mxy, xy1, xy2); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VBLENDVPS: Variable Blend Packed Single Precision Floating-Point Values. +// +// Forms: +// +// VBLENDVPS xmm xmm xmm xmm +// VBLENDVPS xmm m128 xmm xmm +// VBLENDVPS ymm ymm ymm ymm +// VBLENDVPS ymm m256 ymm ymm +// Construct and append a VBLENDVPS instruction to the active function. +// Operates on the global context. +func VBLENDVPS(xy, mxy, xy1, xy2 operand.Op) { ctx.VBLENDVPS(xy, mxy, xy1, xy2) } + +// VBROADCASTF128: Broadcast 128 Bit of Floating-Point Data. +// +// Forms: +// +// VBROADCASTF128 m128 ymm +// Construct and append a VBROADCASTF128 instruction to the active function. +func (c *Context) VBROADCASTF128(m, y operand.Op) { + if inst, err := x86.VBROADCASTF128(m, y); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VBROADCASTF128: Broadcast 128 Bit of Floating-Point Data. +// +// Forms: +// +// VBROADCASTF128 m128 ymm +// Construct and append a VBROADCASTF128 instruction to the active function. +// Operates on the global context. +func VBROADCASTF128(m, y operand.Op) { ctx.VBROADCASTF128(m, y) } + +// VBROADCASTI128: Broadcast 128 Bits of Integer Data. +// +// Forms: +// +// VBROADCASTI128 m128 ymm +// Construct and append a VBROADCASTI128 instruction to the active function. +func (c *Context) VBROADCASTI128(m, y operand.Op) { + if inst, err := x86.VBROADCASTI128(m, y); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VBROADCASTI128: Broadcast 128 Bits of Integer Data. +// +// Forms: +// +// VBROADCASTI128 m128 ymm +// Construct and append a VBROADCASTI128 instruction to the active function. +// Operates on the global context. +func VBROADCASTI128(m, y operand.Op) { ctx.VBROADCASTI128(m, y) } + +// VBROADCASTSD: Broadcast Double-Precision Floating-Point Element. +// +// Forms: +// +// VBROADCASTSD xmm ymm +// VBROADCASTSD m64 ymm +// Construct and append a VBROADCASTSD instruction to the active function. +func (c *Context) VBROADCASTSD(mx, y operand.Op) { + if inst, err := x86.VBROADCASTSD(mx, y); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VBROADCASTSD: Broadcast Double-Precision Floating-Point Element. +// +// Forms: +// +// VBROADCASTSD xmm ymm +// VBROADCASTSD m64 ymm +// Construct and append a VBROADCASTSD instruction to the active function. +// Operates on the global context. +func VBROADCASTSD(mx, y operand.Op) { ctx.VBROADCASTSD(mx, y) } + +// VBROADCASTSS: Broadcast Single-Precision Floating-Point Element. +// +// Forms: +// +// VBROADCASTSS xmm xmm +// VBROADCASTSS m32 xmm +// VBROADCASTSS xmm ymm +// VBROADCASTSS m32 ymm +// Construct and append a VBROADCASTSS instruction to the active function. +func (c *Context) VBROADCASTSS(mx, xy operand.Op) { + if inst, err := x86.VBROADCASTSS(mx, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VBROADCASTSS: Broadcast Single-Precision Floating-Point Element. +// +// Forms: +// +// VBROADCASTSS xmm xmm +// VBROADCASTSS m32 xmm +// VBROADCASTSS xmm ymm +// VBROADCASTSS m32 ymm +// Construct and append a VBROADCASTSS instruction to the active function. +// Operates on the global context. +func VBROADCASTSS(mx, xy operand.Op) { ctx.VBROADCASTSS(mx, xy) } + +// VCMPPD: Compare Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VCMPPD imm8 xmm xmm xmm +// VCMPPD imm8 m128 xmm xmm +// VCMPPD imm8 ymm ymm ymm +// VCMPPD imm8 m256 ymm ymm +// Construct and append a VCMPPD instruction to the active function. +func (c *Context) VCMPPD(i, mxy, xy, xy1 operand.Op) { + if inst, err := x86.VCMPPD(i, mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCMPPD: Compare Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VCMPPD imm8 xmm xmm xmm +// VCMPPD imm8 m128 xmm xmm +// VCMPPD imm8 ymm ymm ymm +// VCMPPD imm8 m256 ymm ymm +// Construct and append a VCMPPD instruction to the active function. +// Operates on the global context. +func VCMPPD(i, mxy, xy, xy1 operand.Op) { ctx.VCMPPD(i, mxy, xy, xy1) } + +// VCMPPS: Compare Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VCMPPS imm8 xmm xmm xmm +// VCMPPS imm8 m128 xmm xmm +// VCMPPS imm8 ymm ymm ymm +// VCMPPS imm8 m256 ymm ymm +// Construct and append a VCMPPS instruction to the active function. +func (c *Context) VCMPPS(i, mxy, xy, xy1 operand.Op) { + if inst, err := x86.VCMPPS(i, mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCMPPS: Compare Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VCMPPS imm8 xmm xmm xmm +// VCMPPS imm8 m128 xmm xmm +// VCMPPS imm8 ymm ymm ymm +// VCMPPS imm8 m256 ymm ymm +// Construct and append a VCMPPS instruction to the active function. +// Operates on the global context. +func VCMPPS(i, mxy, xy, xy1 operand.Op) { ctx.VCMPPS(i, mxy, xy, xy1) } + +// VCMPSD: Compare Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VCMPSD imm8 xmm xmm xmm +// VCMPSD imm8 m64 xmm xmm +// Construct and append a VCMPSD instruction to the active function. +func (c *Context) VCMPSD(i, mx, x, x1 operand.Op) { + if inst, err := x86.VCMPSD(i, mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCMPSD: Compare Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VCMPSD imm8 xmm xmm xmm +// VCMPSD imm8 m64 xmm xmm +// Construct and append a VCMPSD instruction to the active function. +// Operates on the global context. +func VCMPSD(i, mx, x, x1 operand.Op) { ctx.VCMPSD(i, mx, x, x1) } + +// VCMPSS: Compare Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VCMPSS imm8 xmm xmm xmm +// VCMPSS imm8 m32 xmm xmm +// Construct and append a VCMPSS instruction to the active function. +func (c *Context) VCMPSS(i, mx, x, x1 operand.Op) { + if inst, err := x86.VCMPSS(i, mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCMPSS: Compare Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VCMPSS imm8 xmm xmm xmm +// VCMPSS imm8 m32 xmm xmm +// Construct and append a VCMPSS instruction to the active function. +// Operates on the global context. +func VCMPSS(i, mx, x, x1 operand.Op) { ctx.VCMPSS(i, mx, x, x1) } + +// VCOMISD: Compare Scalar Ordered Double-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// VCOMISD xmm xmm +// VCOMISD m64 xmm +// Construct and append a VCOMISD instruction to the active function. +func (c *Context) VCOMISD(mx, x operand.Op) { + if inst, err := x86.VCOMISD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCOMISD: Compare Scalar Ordered Double-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// VCOMISD xmm xmm +// VCOMISD m64 xmm +// Construct and append a VCOMISD instruction to the active function. +// Operates on the global context. +func VCOMISD(mx, x operand.Op) { ctx.VCOMISD(mx, x) } + +// VCOMISS: Compare Scalar Ordered Single-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// VCOMISS xmm xmm +// VCOMISS m32 xmm +// Construct and append a VCOMISS instruction to the active function. +func (c *Context) VCOMISS(mx, x operand.Op) { + if inst, err := x86.VCOMISS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCOMISS: Compare Scalar Ordered Single-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// VCOMISS xmm xmm +// VCOMISS m32 xmm +// Construct and append a VCOMISS instruction to the active function. +// Operates on the global context. +func VCOMISS(mx, x operand.Op) { ctx.VCOMISS(mx, x) } + +// VCVTDQ2PD: Convert Packed Dword Integers to Packed Double-Precision FP Values. +// +// Forms: +// +// VCVTDQ2PD xmm xmm +// VCVTDQ2PD m64 xmm +// VCVTDQ2PD xmm ymm +// VCVTDQ2PD m128 ymm +// Construct and append a VCVTDQ2PD instruction to the active function. +func (c *Context) VCVTDQ2PD(mx, xy operand.Op) { + if inst, err := x86.VCVTDQ2PD(mx, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTDQ2PD: Convert Packed Dword Integers to Packed Double-Precision FP Values. +// +// Forms: +// +// VCVTDQ2PD xmm xmm +// VCVTDQ2PD m64 xmm +// VCVTDQ2PD xmm ymm +// VCVTDQ2PD m128 ymm +// Construct and append a VCVTDQ2PD instruction to the active function. +// Operates on the global context. +func VCVTDQ2PD(mx, xy operand.Op) { ctx.VCVTDQ2PD(mx, xy) } + +// VCVTDQ2PS: Convert Packed Dword Integers to Packed Single-Precision FP Values. +// +// Forms: +// +// VCVTDQ2PS xmm xmm +// VCVTDQ2PS m128 xmm +// VCVTDQ2PS ymm ymm +// VCVTDQ2PS m256 ymm +// Construct and append a VCVTDQ2PS instruction to the active function. +func (c *Context) VCVTDQ2PS(mxy, xy operand.Op) { + if inst, err := x86.VCVTDQ2PS(mxy, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTDQ2PS: Convert Packed Dword Integers to Packed Single-Precision FP Values. +// +// Forms: +// +// VCVTDQ2PS xmm xmm +// VCVTDQ2PS m128 xmm +// VCVTDQ2PS ymm ymm +// VCVTDQ2PS m256 ymm +// Construct and append a VCVTDQ2PS instruction to the active function. +// Operates on the global context. +func VCVTDQ2PS(mxy, xy operand.Op) { ctx.VCVTDQ2PS(mxy, xy) } + +// VCVTPD2DQX: Convert Packed Double-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// VCVTPD2DQX xmm xmm +// VCVTPD2DQX m128 xmm +// Construct and append a VCVTPD2DQX instruction to the active function. +func (c *Context) VCVTPD2DQX(mx, x operand.Op) { + if inst, err := x86.VCVTPD2DQX(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTPD2DQX: Convert Packed Double-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// VCVTPD2DQX xmm xmm +// VCVTPD2DQX m128 xmm +// Construct and append a VCVTPD2DQX instruction to the active function. +// Operates on the global context. +func VCVTPD2DQX(mx, x operand.Op) { ctx.VCVTPD2DQX(mx, x) } + +// VCVTPD2DQY: Convert Packed Double-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// VCVTPD2DQY ymm xmm +// VCVTPD2DQY m256 xmm +// Construct and append a VCVTPD2DQY instruction to the active function. +func (c *Context) VCVTPD2DQY(my, x operand.Op) { + if inst, err := x86.VCVTPD2DQY(my, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTPD2DQY: Convert Packed Double-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// VCVTPD2DQY ymm xmm +// VCVTPD2DQY m256 xmm +// Construct and append a VCVTPD2DQY instruction to the active function. +// Operates on the global context. +func VCVTPD2DQY(my, x operand.Op) { ctx.VCVTPD2DQY(my, x) } + +// VCVTPD2PSX: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values. +// +// Forms: +// +// VCVTPD2PSX xmm xmm +// VCVTPD2PSX m128 xmm +// Construct and append a VCVTPD2PSX instruction to the active function. +func (c *Context) VCVTPD2PSX(mx, x operand.Op) { + if inst, err := x86.VCVTPD2PSX(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTPD2PSX: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values. +// +// Forms: +// +// VCVTPD2PSX xmm xmm +// VCVTPD2PSX m128 xmm +// Construct and append a VCVTPD2PSX instruction to the active function. +// Operates on the global context. +func VCVTPD2PSX(mx, x operand.Op) { ctx.VCVTPD2PSX(mx, x) } + +// VCVTPD2PSY: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values. +// +// Forms: +// +// VCVTPD2PSY ymm xmm +// VCVTPD2PSY m256 xmm +// Construct and append a VCVTPD2PSY instruction to the active function. +func (c *Context) VCVTPD2PSY(my, x operand.Op) { + if inst, err := x86.VCVTPD2PSY(my, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTPD2PSY: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values. +// +// Forms: +// +// VCVTPD2PSY ymm xmm +// VCVTPD2PSY m256 xmm +// Construct and append a VCVTPD2PSY instruction to the active function. +// Operates on the global context. +func VCVTPD2PSY(my, x operand.Op) { ctx.VCVTPD2PSY(my, x) } + +// VCVTPH2PS: Convert Half-Precision FP Values to Single-Precision FP Values. +// +// Forms: +// +// VCVTPH2PS xmm xmm +// VCVTPH2PS m64 xmm +// VCVTPH2PS xmm ymm +// VCVTPH2PS m128 ymm +// Construct and append a VCVTPH2PS instruction to the active function. +func (c *Context) VCVTPH2PS(mx, xy operand.Op) { + if inst, err := x86.VCVTPH2PS(mx, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTPH2PS: Convert Half-Precision FP Values to Single-Precision FP Values. +// +// Forms: +// +// VCVTPH2PS xmm xmm +// VCVTPH2PS m64 xmm +// VCVTPH2PS xmm ymm +// VCVTPH2PS m128 ymm +// Construct and append a VCVTPH2PS instruction to the active function. +// Operates on the global context. +func VCVTPH2PS(mx, xy operand.Op) { ctx.VCVTPH2PS(mx, xy) } + +// VCVTPS2DQ: Convert Packed Single-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// VCVTPS2DQ xmm xmm +// VCVTPS2DQ m128 xmm +// VCVTPS2DQ ymm ymm +// VCVTPS2DQ m256 ymm +// Construct and append a VCVTPS2DQ instruction to the active function. +func (c *Context) VCVTPS2DQ(mxy, xy operand.Op) { + if inst, err := x86.VCVTPS2DQ(mxy, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTPS2DQ: Convert Packed Single-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// VCVTPS2DQ xmm xmm +// VCVTPS2DQ m128 xmm +// VCVTPS2DQ ymm ymm +// VCVTPS2DQ m256 ymm +// Construct and append a VCVTPS2DQ instruction to the active function. +// Operates on the global context. +func VCVTPS2DQ(mxy, xy operand.Op) { ctx.VCVTPS2DQ(mxy, xy) } + +// VCVTPS2PD: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values. +// +// Forms: +// +// VCVTPS2PD xmm xmm +// VCVTPS2PD m64 xmm +// VCVTPS2PD xmm ymm +// VCVTPS2PD m128 ymm +// Construct and append a VCVTPS2PD instruction to the active function. +func (c *Context) VCVTPS2PD(mx, xy operand.Op) { + if inst, err := x86.VCVTPS2PD(mx, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTPS2PD: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values. +// +// Forms: +// +// VCVTPS2PD xmm xmm +// VCVTPS2PD m64 xmm +// VCVTPS2PD xmm ymm +// VCVTPS2PD m128 ymm +// Construct and append a VCVTPS2PD instruction to the active function. +// Operates on the global context. +func VCVTPS2PD(mx, xy operand.Op) { ctx.VCVTPS2PD(mx, xy) } + +// VCVTPS2PH: Convert Single-Precision FP value to Half-Precision FP value. +// +// Forms: +// +// VCVTPS2PH imm8 xmm xmm +// VCVTPS2PH imm8 ymm xmm +// VCVTPS2PH imm8 xmm m64 +// VCVTPS2PH imm8 ymm m128 +// Construct and append a VCVTPS2PH instruction to the active function. +func (c *Context) VCVTPS2PH(i, xy, mx operand.Op) { + if inst, err := x86.VCVTPS2PH(i, xy, mx); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTPS2PH: Convert Single-Precision FP value to Half-Precision FP value. +// +// Forms: +// +// VCVTPS2PH imm8 xmm xmm +// VCVTPS2PH imm8 ymm xmm +// VCVTPS2PH imm8 xmm m64 +// VCVTPS2PH imm8 ymm m128 +// Construct and append a VCVTPS2PH instruction to the active function. +// Operates on the global context. +func VCVTPS2PH(i, xy, mx operand.Op) { ctx.VCVTPS2PH(i, xy, mx) } + +// VCVTSD2SI: Convert Scalar Double-Precision FP Value to Integer. +// +// Forms: +// +// VCVTSD2SI xmm r32 +// VCVTSD2SI m64 r32 +// Construct and append a VCVTSD2SI instruction to the active function. +func (c *Context) VCVTSD2SI(mx, r operand.Op) { + if inst, err := x86.VCVTSD2SI(mx, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTSD2SI: Convert Scalar Double-Precision FP Value to Integer. +// +// Forms: +// +// VCVTSD2SI xmm r32 +// VCVTSD2SI m64 r32 +// Construct and append a VCVTSD2SI instruction to the active function. +// Operates on the global context. +func VCVTSD2SI(mx, r operand.Op) { ctx.VCVTSD2SI(mx, r) } + +// VCVTSD2SIQ: Convert Scalar Double-Precision FP Value to Integer. +// +// Forms: +// +// VCVTSD2SIQ xmm r64 +// VCVTSD2SIQ m64 r64 +// Construct and append a VCVTSD2SIQ instruction to the active function. +func (c *Context) VCVTSD2SIQ(mx, r operand.Op) { + if inst, err := x86.VCVTSD2SIQ(mx, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTSD2SIQ: Convert Scalar Double-Precision FP Value to Integer. +// +// Forms: +// +// VCVTSD2SIQ xmm r64 +// VCVTSD2SIQ m64 r64 +// Construct and append a VCVTSD2SIQ instruction to the active function. +// Operates on the global context. +func VCVTSD2SIQ(mx, r operand.Op) { ctx.VCVTSD2SIQ(mx, r) } + +// VCVTSD2SS: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value. +// +// Forms: +// +// VCVTSD2SS xmm xmm xmm +// VCVTSD2SS m64 xmm xmm +// Construct and append a VCVTSD2SS instruction to the active function. +func (c *Context) VCVTSD2SS(mx, x, x1 operand.Op) { + if inst, err := x86.VCVTSD2SS(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTSD2SS: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value. +// +// Forms: +// +// VCVTSD2SS xmm xmm xmm +// VCVTSD2SS m64 xmm xmm +// Construct and append a VCVTSD2SS instruction to the active function. +// Operates on the global context. +func VCVTSD2SS(mx, x, x1 operand.Op) { ctx.VCVTSD2SS(mx, x, x1) } + +// VCVTSI2SDL: Convert Dword Integer to Scalar Double-Precision FP Value. +// +// Forms: +// +// VCVTSI2SDL r32 xmm xmm +// VCVTSI2SDL m32 xmm xmm +// Construct and append a VCVTSI2SDL instruction to the active function. +func (c *Context) VCVTSI2SDL(mr, x, x1 operand.Op) { + if inst, err := x86.VCVTSI2SDL(mr, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTSI2SDL: Convert Dword Integer to Scalar Double-Precision FP Value. +// +// Forms: +// +// VCVTSI2SDL r32 xmm xmm +// VCVTSI2SDL m32 xmm xmm +// Construct and append a VCVTSI2SDL instruction to the active function. +// Operates on the global context. +func VCVTSI2SDL(mr, x, x1 operand.Op) { ctx.VCVTSI2SDL(mr, x, x1) } + +// VCVTSI2SDQ: Convert Dword Integer to Scalar Double-Precision FP Value. +// +// Forms: +// +// VCVTSI2SDQ r64 xmm xmm +// VCVTSI2SDQ m64 xmm xmm +// Construct and append a VCVTSI2SDQ instruction to the active function. +func (c *Context) VCVTSI2SDQ(mr, x, x1 operand.Op) { + if inst, err := x86.VCVTSI2SDQ(mr, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTSI2SDQ: Convert Dword Integer to Scalar Double-Precision FP Value. +// +// Forms: +// +// VCVTSI2SDQ r64 xmm xmm +// VCVTSI2SDQ m64 xmm xmm +// Construct and append a VCVTSI2SDQ instruction to the active function. +// Operates on the global context. +func VCVTSI2SDQ(mr, x, x1 operand.Op) { ctx.VCVTSI2SDQ(mr, x, x1) } + +// VCVTSI2SSL: Convert Dword Integer to Scalar Single-Precision FP Value. +// +// Forms: +// +// VCVTSI2SSL r32 xmm xmm +// VCVTSI2SSL m32 xmm xmm +// Construct and append a VCVTSI2SSL instruction to the active function. +func (c *Context) VCVTSI2SSL(mr, x, x1 operand.Op) { + if inst, err := x86.VCVTSI2SSL(mr, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTSI2SSL: Convert Dword Integer to Scalar Single-Precision FP Value. +// +// Forms: +// +// VCVTSI2SSL r32 xmm xmm +// VCVTSI2SSL m32 xmm xmm +// Construct and append a VCVTSI2SSL instruction to the active function. +// Operates on the global context. +func VCVTSI2SSL(mr, x, x1 operand.Op) { ctx.VCVTSI2SSL(mr, x, x1) } + +// VCVTSI2SSQ: Convert Dword Integer to Scalar Single-Precision FP Value. +// +// Forms: +// +// VCVTSI2SSQ r64 xmm xmm +// VCVTSI2SSQ m64 xmm xmm +// Construct and append a VCVTSI2SSQ instruction to the active function. +func (c *Context) VCVTSI2SSQ(mr, x, x1 operand.Op) { + if inst, err := x86.VCVTSI2SSQ(mr, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTSI2SSQ: Convert Dword Integer to Scalar Single-Precision FP Value. +// +// Forms: +// +// VCVTSI2SSQ r64 xmm xmm +// VCVTSI2SSQ m64 xmm xmm +// Construct and append a VCVTSI2SSQ instruction to the active function. +// Operates on the global context. +func VCVTSI2SSQ(mr, x, x1 operand.Op) { ctx.VCVTSI2SSQ(mr, x, x1) } + +// VCVTSS2SD: Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value. +// +// Forms: +// +// VCVTSS2SD xmm xmm xmm +// VCVTSS2SD m32 xmm xmm +// Construct and append a VCVTSS2SD instruction to the active function. +func (c *Context) VCVTSS2SD(mx, x, x1 operand.Op) { + if inst, err := x86.VCVTSS2SD(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTSS2SD: Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value. +// +// Forms: +// +// VCVTSS2SD xmm xmm xmm +// VCVTSS2SD m32 xmm xmm +// Construct and append a VCVTSS2SD instruction to the active function. +// Operates on the global context. +func VCVTSS2SD(mx, x, x1 operand.Op) { ctx.VCVTSS2SD(mx, x, x1) } + +// VCVTSS2SI: Convert Scalar Single-Precision FP Value to Dword Integer. +// +// Forms: +// +// VCVTSS2SI xmm r32 +// VCVTSS2SI m32 r32 +// Construct and append a VCVTSS2SI instruction to the active function. +func (c *Context) VCVTSS2SI(mx, r operand.Op) { + if inst, err := x86.VCVTSS2SI(mx, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTSS2SI: Convert Scalar Single-Precision FP Value to Dword Integer. +// +// Forms: +// +// VCVTSS2SI xmm r32 +// VCVTSS2SI m32 r32 +// Construct and append a VCVTSS2SI instruction to the active function. +// Operates on the global context. +func VCVTSS2SI(mx, r operand.Op) { ctx.VCVTSS2SI(mx, r) } + +// VCVTSS2SIQ: Convert Scalar Single-Precision FP Value to Dword Integer. +// +// Forms: +// +// VCVTSS2SIQ xmm r64 +// VCVTSS2SIQ m32 r64 +// Construct and append a VCVTSS2SIQ instruction to the active function. +func (c *Context) VCVTSS2SIQ(mx, r operand.Op) { + if inst, err := x86.VCVTSS2SIQ(mx, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTSS2SIQ: Convert Scalar Single-Precision FP Value to Dword Integer. +// +// Forms: +// +// VCVTSS2SIQ xmm r64 +// VCVTSS2SIQ m32 r64 +// Construct and append a VCVTSS2SIQ instruction to the active function. +// Operates on the global context. +func VCVTSS2SIQ(mx, r operand.Op) { ctx.VCVTSS2SIQ(mx, r) } + +// VCVTTPD2DQX: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// VCVTTPD2DQX xmm xmm +// VCVTTPD2DQX m128 xmm +// Construct and append a VCVTTPD2DQX instruction to the active function. +func (c *Context) VCVTTPD2DQX(mx, x operand.Op) { + if inst, err := x86.VCVTTPD2DQX(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTTPD2DQX: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// VCVTTPD2DQX xmm xmm +// VCVTTPD2DQX m128 xmm +// Construct and append a VCVTTPD2DQX instruction to the active function. +// Operates on the global context. +func VCVTTPD2DQX(mx, x operand.Op) { ctx.VCVTTPD2DQX(mx, x) } + +// VCVTTPD2DQY: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// VCVTTPD2DQY ymm xmm +// VCVTTPD2DQY m256 xmm +// Construct and append a VCVTTPD2DQY instruction to the active function. +func (c *Context) VCVTTPD2DQY(my, x operand.Op) { + if inst, err := x86.VCVTTPD2DQY(my, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTTPD2DQY: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// VCVTTPD2DQY ymm xmm +// VCVTTPD2DQY m256 xmm +// Construct and append a VCVTTPD2DQY instruction to the active function. +// Operates on the global context. +func VCVTTPD2DQY(my, x operand.Op) { ctx.VCVTTPD2DQY(my, x) } + +// VCVTTPS2DQ: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// VCVTTPS2DQ xmm xmm +// VCVTTPS2DQ m128 xmm +// VCVTTPS2DQ ymm ymm +// VCVTTPS2DQ m256 ymm +// Construct and append a VCVTTPS2DQ instruction to the active function. +func (c *Context) VCVTTPS2DQ(mxy, xy operand.Op) { + if inst, err := x86.VCVTTPS2DQ(mxy, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTTPS2DQ: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// VCVTTPS2DQ xmm xmm +// VCVTTPS2DQ m128 xmm +// VCVTTPS2DQ ymm ymm +// VCVTTPS2DQ m256 ymm +// Construct and append a VCVTTPS2DQ instruction to the active function. +// Operates on the global context. +func VCVTTPS2DQ(mxy, xy operand.Op) { ctx.VCVTTPS2DQ(mxy, xy) } + +// VCVTTSD2SI: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer. +// +// Forms: +// +// VCVTTSD2SI xmm r32 +// VCVTTSD2SI m64 r32 +// Construct and append a VCVTTSD2SI instruction to the active function. +func (c *Context) VCVTTSD2SI(mx, r operand.Op) { + if inst, err := x86.VCVTTSD2SI(mx, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTTSD2SI: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer. +// +// Forms: +// +// VCVTTSD2SI xmm r32 +// VCVTTSD2SI m64 r32 +// Construct and append a VCVTTSD2SI instruction to the active function. +// Operates on the global context. +func VCVTTSD2SI(mx, r operand.Op) { ctx.VCVTTSD2SI(mx, r) } + +// VCVTTSD2SIQ: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer. +// +// Forms: +// +// VCVTTSD2SIQ xmm r64 +// VCVTTSD2SIQ m64 r64 +// Construct and append a VCVTTSD2SIQ instruction to the active function. +func (c *Context) VCVTTSD2SIQ(mx, r operand.Op) { + if inst, err := x86.VCVTTSD2SIQ(mx, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTTSD2SIQ: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer. +// +// Forms: +// +// VCVTTSD2SIQ xmm r64 +// VCVTTSD2SIQ m64 r64 +// Construct and append a VCVTTSD2SIQ instruction to the active function. +// Operates on the global context. +func VCVTTSD2SIQ(mx, r operand.Op) { ctx.VCVTTSD2SIQ(mx, r) } + +// VCVTTSS2SI: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer. +// +// Forms: +// +// VCVTTSS2SI xmm r32 +// VCVTTSS2SI m32 r32 +// Construct and append a VCVTTSS2SI instruction to the active function. +func (c *Context) VCVTTSS2SI(mx, r operand.Op) { + if inst, err := x86.VCVTTSS2SI(mx, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTTSS2SI: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer. +// +// Forms: +// +// VCVTTSS2SI xmm r32 +// VCVTTSS2SI m32 r32 +// Construct and append a VCVTTSS2SI instruction to the active function. +// Operates on the global context. +func VCVTTSS2SI(mx, r operand.Op) { ctx.VCVTTSS2SI(mx, r) } + +// VCVTTSS2SIQ: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer. +// +// Forms: +// +// VCVTTSS2SIQ xmm r64 +// VCVTTSS2SIQ m32 r64 +// Construct and append a VCVTTSS2SIQ instruction to the active function. +func (c *Context) VCVTTSS2SIQ(mx, r operand.Op) { + if inst, err := x86.VCVTTSS2SIQ(mx, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VCVTTSS2SIQ: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer. +// +// Forms: +// +// VCVTTSS2SIQ xmm r64 +// VCVTTSS2SIQ m32 r64 +// Construct and append a VCVTTSS2SIQ instruction to the active function. +// Operates on the global context. +func VCVTTSS2SIQ(mx, r operand.Op) { ctx.VCVTTSS2SIQ(mx, r) } + +// VDIVPD: Divide Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VDIVPD xmm xmm xmm +// VDIVPD m128 xmm xmm +// VDIVPD ymm ymm ymm +// VDIVPD m256 ymm ymm +// Construct and append a VDIVPD instruction to the active function. +func (c *Context) VDIVPD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VDIVPD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VDIVPD: Divide Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VDIVPD xmm xmm xmm +// VDIVPD m128 xmm xmm +// VDIVPD ymm ymm ymm +// VDIVPD m256 ymm ymm +// Construct and append a VDIVPD instruction to the active function. +// Operates on the global context. +func VDIVPD(mxy, xy, xy1 operand.Op) { ctx.VDIVPD(mxy, xy, xy1) } + +// VDIVPS: Divide Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VDIVPS xmm xmm xmm +// VDIVPS m128 xmm xmm +// VDIVPS ymm ymm ymm +// VDIVPS m256 ymm ymm +// Construct and append a VDIVPS instruction to the active function. +func (c *Context) VDIVPS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VDIVPS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VDIVPS: Divide Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VDIVPS xmm xmm xmm +// VDIVPS m128 xmm xmm +// VDIVPS ymm ymm ymm +// VDIVPS m256 ymm ymm +// Construct and append a VDIVPS instruction to the active function. +// Operates on the global context. +func VDIVPS(mxy, xy, xy1 operand.Op) { ctx.VDIVPS(mxy, xy, xy1) } + +// VDIVSD: Divide Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VDIVSD xmm xmm xmm +// VDIVSD m64 xmm xmm +// Construct and append a VDIVSD instruction to the active function. +func (c *Context) VDIVSD(mx, x, x1 operand.Op) { + if inst, err := x86.VDIVSD(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VDIVSD: Divide Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VDIVSD xmm xmm xmm +// VDIVSD m64 xmm xmm +// Construct and append a VDIVSD instruction to the active function. +// Operates on the global context. +func VDIVSD(mx, x, x1 operand.Op) { ctx.VDIVSD(mx, x, x1) } + +// VDIVSS: Divide Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VDIVSS xmm xmm xmm +// VDIVSS m32 xmm xmm +// Construct and append a VDIVSS instruction to the active function. +func (c *Context) VDIVSS(mx, x, x1 operand.Op) { + if inst, err := x86.VDIVSS(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VDIVSS: Divide Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VDIVSS xmm xmm xmm +// VDIVSS m32 xmm xmm +// Construct and append a VDIVSS instruction to the active function. +// Operates on the global context. +func VDIVSS(mx, x, x1 operand.Op) { ctx.VDIVSS(mx, x, x1) } + +// VDPPD: Dot Product of Packed Double Precision Floating-Point Values. +// +// Forms: +// +// VDPPD imm8 xmm xmm xmm +// VDPPD imm8 m128 xmm xmm +// Construct and append a VDPPD instruction to the active function. +func (c *Context) VDPPD(i, mx, x, x1 operand.Op) { + if inst, err := x86.VDPPD(i, mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VDPPD: Dot Product of Packed Double Precision Floating-Point Values. +// +// Forms: +// +// VDPPD imm8 xmm xmm xmm +// VDPPD imm8 m128 xmm xmm +// Construct and append a VDPPD instruction to the active function. +// Operates on the global context. +func VDPPD(i, mx, x, x1 operand.Op) { ctx.VDPPD(i, mx, x, x1) } + +// VDPPS: Dot Product of Packed Single Precision Floating-Point Values. +// +// Forms: +// +// VDPPS imm8 xmm xmm xmm +// VDPPS imm8 m128 xmm xmm +// VDPPS imm8 ymm ymm ymm +// VDPPS imm8 m256 ymm ymm +// Construct and append a VDPPS instruction to the active function. +func (c *Context) VDPPS(i, mxy, xy, xy1 operand.Op) { + if inst, err := x86.VDPPS(i, mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VDPPS: Dot Product of Packed Single Precision Floating-Point Values. +// +// Forms: +// +// VDPPS imm8 xmm xmm xmm +// VDPPS imm8 m128 xmm xmm +// VDPPS imm8 ymm ymm ymm +// VDPPS imm8 m256 ymm ymm +// Construct and append a VDPPS instruction to the active function. +// Operates on the global context. +func VDPPS(i, mxy, xy, xy1 operand.Op) { ctx.VDPPS(i, mxy, xy, xy1) } + +// VEXTRACTF128: Extract Packed Floating-Point Values. +// +// Forms: +// +// VEXTRACTF128 imm8 ymm xmm +// VEXTRACTF128 imm8 ymm m128 +// Construct and append a VEXTRACTF128 instruction to the active function. +func (c *Context) VEXTRACTF128(i, y, mx operand.Op) { + if inst, err := x86.VEXTRACTF128(i, y, mx); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VEXTRACTF128: Extract Packed Floating-Point Values. +// +// Forms: +// +// VEXTRACTF128 imm8 ymm xmm +// VEXTRACTF128 imm8 ymm m128 +// Construct and append a VEXTRACTF128 instruction to the active function. +// Operates on the global context. +func VEXTRACTF128(i, y, mx operand.Op) { ctx.VEXTRACTF128(i, y, mx) } + +// VEXTRACTI128: Extract Packed Integer Values. +// +// Forms: +// +// VEXTRACTI128 imm8 ymm xmm +// VEXTRACTI128 imm8 ymm m128 +// Construct and append a VEXTRACTI128 instruction to the active function. +func (c *Context) VEXTRACTI128(i, y, mx operand.Op) { + if inst, err := x86.VEXTRACTI128(i, y, mx); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VEXTRACTI128: Extract Packed Integer Values. +// +// Forms: +// +// VEXTRACTI128 imm8 ymm xmm +// VEXTRACTI128 imm8 ymm m128 +// Construct and append a VEXTRACTI128 instruction to the active function. +// Operates on the global context. +func VEXTRACTI128(i, y, mx operand.Op) { ctx.VEXTRACTI128(i, y, mx) } + +// VEXTRACTPS: Extract Packed Single Precision Floating-Point Value. +// +// Forms: +// +// VEXTRACTPS imm8 xmm r32 +// VEXTRACTPS imm8 xmm m32 +// Construct and append a VEXTRACTPS instruction to the active function. +func (c *Context) VEXTRACTPS(i, x, mr operand.Op) { + if inst, err := x86.VEXTRACTPS(i, x, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VEXTRACTPS: Extract Packed Single Precision Floating-Point Value. +// +// Forms: +// +// VEXTRACTPS imm8 xmm r32 +// VEXTRACTPS imm8 xmm m32 +// Construct and append a VEXTRACTPS instruction to the active function. +// Operates on the global context. +func VEXTRACTPS(i, x, mr operand.Op) { ctx.VEXTRACTPS(i, x, mr) } + +// VFMADD132PD: Fused Multiply-Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD132PD xmm xmm xmm +// VFMADD132PD m128 xmm xmm +// VFMADD132PD ymm ymm ymm +// VFMADD132PD m256 ymm ymm +// Construct and append a VFMADD132PD instruction to the active function. +func (c *Context) VFMADD132PD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMADD132PD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMADD132PD: Fused Multiply-Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD132PD xmm xmm xmm +// VFMADD132PD m128 xmm xmm +// VFMADD132PD ymm ymm ymm +// VFMADD132PD m256 ymm ymm +// Construct and append a VFMADD132PD instruction to the active function. +// Operates on the global context. +func VFMADD132PD(mxy, xy, xy1 operand.Op) { ctx.VFMADD132PD(mxy, xy, xy1) } + +// VFMADD132PS: Fused Multiply-Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD132PS xmm xmm xmm +// VFMADD132PS m128 xmm xmm +// VFMADD132PS ymm ymm ymm +// VFMADD132PS m256 ymm ymm +// Construct and append a VFMADD132PS instruction to the active function. +func (c *Context) VFMADD132PS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMADD132PS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMADD132PS: Fused Multiply-Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD132PS xmm xmm xmm +// VFMADD132PS m128 xmm xmm +// VFMADD132PS ymm ymm ymm +// VFMADD132PS m256 ymm ymm +// Construct and append a VFMADD132PS instruction to the active function. +// Operates on the global context. +func VFMADD132PS(mxy, xy, xy1 operand.Op) { ctx.VFMADD132PS(mxy, xy, xy1) } + +// VFMADD132SD: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD132SD xmm xmm xmm +// VFMADD132SD m64 xmm xmm +// Construct and append a VFMADD132SD instruction to the active function. +func (c *Context) VFMADD132SD(mx, x, x1 operand.Op) { + if inst, err := x86.VFMADD132SD(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMADD132SD: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD132SD xmm xmm xmm +// VFMADD132SD m64 xmm xmm +// Construct and append a VFMADD132SD instruction to the active function. +// Operates on the global context. +func VFMADD132SD(mx, x, x1 operand.Op) { ctx.VFMADD132SD(mx, x, x1) } + +// VFMADD132SS: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD132SS xmm xmm xmm +// VFMADD132SS m32 xmm xmm +// Construct and append a VFMADD132SS instruction to the active function. +func (c *Context) VFMADD132SS(mx, x, x1 operand.Op) { + if inst, err := x86.VFMADD132SS(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMADD132SS: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD132SS xmm xmm xmm +// VFMADD132SS m32 xmm xmm +// Construct and append a VFMADD132SS instruction to the active function. +// Operates on the global context. +func VFMADD132SS(mx, x, x1 operand.Op) { ctx.VFMADD132SS(mx, x, x1) } + +// VFMADD213PD: Fused Multiply-Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD213PD xmm xmm xmm +// VFMADD213PD m128 xmm xmm +// VFMADD213PD ymm ymm ymm +// VFMADD213PD m256 ymm ymm +// Construct and append a VFMADD213PD instruction to the active function. +func (c *Context) VFMADD213PD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMADD213PD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMADD213PD: Fused Multiply-Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD213PD xmm xmm xmm +// VFMADD213PD m128 xmm xmm +// VFMADD213PD ymm ymm ymm +// VFMADD213PD m256 ymm ymm +// Construct and append a VFMADD213PD instruction to the active function. +// Operates on the global context. +func VFMADD213PD(mxy, xy, xy1 operand.Op) { ctx.VFMADD213PD(mxy, xy, xy1) } + +// VFMADD213PS: Fused Multiply-Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD213PS xmm xmm xmm +// VFMADD213PS m128 xmm xmm +// VFMADD213PS ymm ymm ymm +// VFMADD213PS m256 ymm ymm +// Construct and append a VFMADD213PS instruction to the active function. +func (c *Context) VFMADD213PS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMADD213PS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMADD213PS: Fused Multiply-Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD213PS xmm xmm xmm +// VFMADD213PS m128 xmm xmm +// VFMADD213PS ymm ymm ymm +// VFMADD213PS m256 ymm ymm +// Construct and append a VFMADD213PS instruction to the active function. +// Operates on the global context. +func VFMADD213PS(mxy, xy, xy1 operand.Op) { ctx.VFMADD213PS(mxy, xy, xy1) } + +// VFMADD213SD: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD213SD xmm xmm xmm +// VFMADD213SD m64 xmm xmm +// Construct and append a VFMADD213SD instruction to the active function. +func (c *Context) VFMADD213SD(mx, x, x1 operand.Op) { + if inst, err := x86.VFMADD213SD(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMADD213SD: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD213SD xmm xmm xmm +// VFMADD213SD m64 xmm xmm +// Construct and append a VFMADD213SD instruction to the active function. +// Operates on the global context. +func VFMADD213SD(mx, x, x1 operand.Op) { ctx.VFMADD213SD(mx, x, x1) } + +// VFMADD213SS: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD213SS xmm xmm xmm +// VFMADD213SS m32 xmm xmm +// Construct and append a VFMADD213SS instruction to the active function. +func (c *Context) VFMADD213SS(mx, x, x1 operand.Op) { + if inst, err := x86.VFMADD213SS(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMADD213SS: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD213SS xmm xmm xmm +// VFMADD213SS m32 xmm xmm +// Construct and append a VFMADD213SS instruction to the active function. +// Operates on the global context. +func VFMADD213SS(mx, x, x1 operand.Op) { ctx.VFMADD213SS(mx, x, x1) } + +// VFMADD231PD: Fused Multiply-Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD231PD xmm xmm xmm +// VFMADD231PD m128 xmm xmm +// VFMADD231PD ymm ymm ymm +// VFMADD231PD m256 ymm ymm +// Construct and append a VFMADD231PD instruction to the active function. +func (c *Context) VFMADD231PD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMADD231PD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMADD231PD: Fused Multiply-Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD231PD xmm xmm xmm +// VFMADD231PD m128 xmm xmm +// VFMADD231PD ymm ymm ymm +// VFMADD231PD m256 ymm ymm +// Construct and append a VFMADD231PD instruction to the active function. +// Operates on the global context. +func VFMADD231PD(mxy, xy, xy1 operand.Op) { ctx.VFMADD231PD(mxy, xy, xy1) } + +// VFMADD231PS: Fused Multiply-Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD231PS xmm xmm xmm +// VFMADD231PS m128 xmm xmm +// VFMADD231PS ymm ymm ymm +// VFMADD231PS m256 ymm ymm +// Construct and append a VFMADD231PS instruction to the active function. +func (c *Context) VFMADD231PS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMADD231PS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMADD231PS: Fused Multiply-Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD231PS xmm xmm xmm +// VFMADD231PS m128 xmm xmm +// VFMADD231PS ymm ymm ymm +// VFMADD231PS m256 ymm ymm +// Construct and append a VFMADD231PS instruction to the active function. +// Operates on the global context. +func VFMADD231PS(mxy, xy, xy1 operand.Op) { ctx.VFMADD231PS(mxy, xy, xy1) } + +// VFMADD231SD: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD231SD xmm xmm xmm +// VFMADD231SD m64 xmm xmm +// Construct and append a VFMADD231SD instruction to the active function. +func (c *Context) VFMADD231SD(mx, x, x1 operand.Op) { + if inst, err := x86.VFMADD231SD(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMADD231SD: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD231SD xmm xmm xmm +// VFMADD231SD m64 xmm xmm +// Construct and append a VFMADD231SD instruction to the active function. +// Operates on the global context. +func VFMADD231SD(mx, x, x1 operand.Op) { ctx.VFMADD231SD(mx, x, x1) } + +// VFMADD231SS: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD231SS xmm xmm xmm +// VFMADD231SS m32 xmm xmm +// Construct and append a VFMADD231SS instruction to the active function. +func (c *Context) VFMADD231SS(mx, x, x1 operand.Op) { + if inst, err := x86.VFMADD231SS(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMADD231SS: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD231SS xmm xmm xmm +// VFMADD231SS m32 xmm xmm +// Construct and append a VFMADD231SS instruction to the active function. +// Operates on the global context. +func VFMADD231SS(mx, x, x1 operand.Op) { ctx.VFMADD231SS(mx, x, x1) } + +// VFMADDSUB132PD: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADDSUB132PD xmm xmm xmm +// VFMADDSUB132PD m128 xmm xmm +// VFMADDSUB132PD ymm ymm ymm +// VFMADDSUB132PD m256 ymm ymm +// Construct and append a VFMADDSUB132PD instruction to the active function. +func (c *Context) VFMADDSUB132PD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMADDSUB132PD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMADDSUB132PD: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADDSUB132PD xmm xmm xmm +// VFMADDSUB132PD m128 xmm xmm +// VFMADDSUB132PD ymm ymm ymm +// VFMADDSUB132PD m256 ymm ymm +// Construct and append a VFMADDSUB132PD instruction to the active function. +// Operates on the global context. +func VFMADDSUB132PD(mxy, xy, xy1 operand.Op) { ctx.VFMADDSUB132PD(mxy, xy, xy1) } + +// VFMADDSUB132PS: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADDSUB132PS xmm xmm xmm +// VFMADDSUB132PS m128 xmm xmm +// VFMADDSUB132PS ymm ymm ymm +// VFMADDSUB132PS m256 ymm ymm +// Construct and append a VFMADDSUB132PS instruction to the active function. +func (c *Context) VFMADDSUB132PS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMADDSUB132PS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMADDSUB132PS: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADDSUB132PS xmm xmm xmm +// VFMADDSUB132PS m128 xmm xmm +// VFMADDSUB132PS ymm ymm ymm +// VFMADDSUB132PS m256 ymm ymm +// Construct and append a VFMADDSUB132PS instruction to the active function. +// Operates on the global context. +func VFMADDSUB132PS(mxy, xy, xy1 operand.Op) { ctx.VFMADDSUB132PS(mxy, xy, xy1) } + +// VFMADDSUB213PD: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADDSUB213PD xmm xmm xmm +// VFMADDSUB213PD m128 xmm xmm +// VFMADDSUB213PD ymm ymm ymm +// VFMADDSUB213PD m256 ymm ymm +// Construct and append a VFMADDSUB213PD instruction to the active function. +func (c *Context) VFMADDSUB213PD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMADDSUB213PD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMADDSUB213PD: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADDSUB213PD xmm xmm xmm +// VFMADDSUB213PD m128 xmm xmm +// VFMADDSUB213PD ymm ymm ymm +// VFMADDSUB213PD m256 ymm ymm +// Construct and append a VFMADDSUB213PD instruction to the active function. +// Operates on the global context. +func VFMADDSUB213PD(mxy, xy, xy1 operand.Op) { ctx.VFMADDSUB213PD(mxy, xy, xy1) } + +// VFMADDSUB213PS: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADDSUB213PS xmm xmm xmm +// VFMADDSUB213PS m128 xmm xmm +// VFMADDSUB213PS ymm ymm ymm +// VFMADDSUB213PS m256 ymm ymm +// Construct and append a VFMADDSUB213PS instruction to the active function. +func (c *Context) VFMADDSUB213PS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMADDSUB213PS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMADDSUB213PS: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADDSUB213PS xmm xmm xmm +// VFMADDSUB213PS m128 xmm xmm +// VFMADDSUB213PS ymm ymm ymm +// VFMADDSUB213PS m256 ymm ymm +// Construct and append a VFMADDSUB213PS instruction to the active function. +// Operates on the global context. +func VFMADDSUB213PS(mxy, xy, xy1 operand.Op) { ctx.VFMADDSUB213PS(mxy, xy, xy1) } + +// VFMADDSUB231PD: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADDSUB231PD xmm xmm xmm +// VFMADDSUB231PD m128 xmm xmm +// VFMADDSUB231PD ymm ymm ymm +// VFMADDSUB231PD m256 ymm ymm +// Construct and append a VFMADDSUB231PD instruction to the active function. +func (c *Context) VFMADDSUB231PD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMADDSUB231PD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMADDSUB231PD: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADDSUB231PD xmm xmm xmm +// VFMADDSUB231PD m128 xmm xmm +// VFMADDSUB231PD ymm ymm ymm +// VFMADDSUB231PD m256 ymm ymm +// Construct and append a VFMADDSUB231PD instruction to the active function. +// Operates on the global context. +func VFMADDSUB231PD(mxy, xy, xy1 operand.Op) { ctx.VFMADDSUB231PD(mxy, xy, xy1) } + +// VFMADDSUB231PS: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADDSUB231PS xmm xmm xmm +// VFMADDSUB231PS m128 xmm xmm +// VFMADDSUB231PS ymm ymm ymm +// VFMADDSUB231PS m256 ymm ymm +// Construct and append a VFMADDSUB231PS instruction to the active function. +func (c *Context) VFMADDSUB231PS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMADDSUB231PS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMADDSUB231PS: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADDSUB231PS xmm xmm xmm +// VFMADDSUB231PS m128 xmm xmm +// VFMADDSUB231PS ymm ymm ymm +// VFMADDSUB231PS m256 ymm ymm +// Construct and append a VFMADDSUB231PS instruction to the active function. +// Operates on the global context. +func VFMADDSUB231PS(mxy, xy, xy1 operand.Op) { ctx.VFMADDSUB231PS(mxy, xy, xy1) } + +// VFMSUB132PD: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB132PD xmm xmm xmm +// VFMSUB132PD m128 xmm xmm +// VFMSUB132PD ymm ymm ymm +// VFMSUB132PD m256 ymm ymm +// Construct and append a VFMSUB132PD instruction to the active function. +func (c *Context) VFMSUB132PD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMSUB132PD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMSUB132PD: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB132PD xmm xmm xmm +// VFMSUB132PD m128 xmm xmm +// VFMSUB132PD ymm ymm ymm +// VFMSUB132PD m256 ymm ymm +// Construct and append a VFMSUB132PD instruction to the active function. +// Operates on the global context. +func VFMSUB132PD(mxy, xy, xy1 operand.Op) { ctx.VFMSUB132PD(mxy, xy, xy1) } + +// VFMSUB132PS: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB132PS xmm xmm xmm +// VFMSUB132PS m128 xmm xmm +// VFMSUB132PS ymm ymm ymm +// VFMSUB132PS m256 ymm ymm +// Construct and append a VFMSUB132PS instruction to the active function. +func (c *Context) VFMSUB132PS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMSUB132PS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMSUB132PS: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB132PS xmm xmm xmm +// VFMSUB132PS m128 xmm xmm +// VFMSUB132PS ymm ymm ymm +// VFMSUB132PS m256 ymm ymm +// Construct and append a VFMSUB132PS instruction to the active function. +// Operates on the global context. +func VFMSUB132PS(mxy, xy, xy1 operand.Op) { ctx.VFMSUB132PS(mxy, xy, xy1) } + +// VFMSUB132SD: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB132SD xmm xmm xmm +// VFMSUB132SD m64 xmm xmm +// Construct and append a VFMSUB132SD instruction to the active function. +func (c *Context) VFMSUB132SD(mx, x, x1 operand.Op) { + if inst, err := x86.VFMSUB132SD(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMSUB132SD: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB132SD xmm xmm xmm +// VFMSUB132SD m64 xmm xmm +// Construct and append a VFMSUB132SD instruction to the active function. +// Operates on the global context. +func VFMSUB132SD(mx, x, x1 operand.Op) { ctx.VFMSUB132SD(mx, x, x1) } + +// VFMSUB132SS: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB132SS xmm xmm xmm +// VFMSUB132SS m32 xmm xmm +// Construct and append a VFMSUB132SS instruction to the active function. +func (c *Context) VFMSUB132SS(mx, x, x1 operand.Op) { + if inst, err := x86.VFMSUB132SS(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMSUB132SS: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB132SS xmm xmm xmm +// VFMSUB132SS m32 xmm xmm +// Construct and append a VFMSUB132SS instruction to the active function. +// Operates on the global context. +func VFMSUB132SS(mx, x, x1 operand.Op) { ctx.VFMSUB132SS(mx, x, x1) } + +// VFMSUB213PD: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB213PD xmm xmm xmm +// VFMSUB213PD m128 xmm xmm +// VFMSUB213PD ymm ymm ymm +// VFMSUB213PD m256 ymm ymm +// Construct and append a VFMSUB213PD instruction to the active function. +func (c *Context) VFMSUB213PD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMSUB213PD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMSUB213PD: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB213PD xmm xmm xmm +// VFMSUB213PD m128 xmm xmm +// VFMSUB213PD ymm ymm ymm +// VFMSUB213PD m256 ymm ymm +// Construct and append a VFMSUB213PD instruction to the active function. +// Operates on the global context. +func VFMSUB213PD(mxy, xy, xy1 operand.Op) { ctx.VFMSUB213PD(mxy, xy, xy1) } + +// VFMSUB213PS: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB213PS xmm xmm xmm +// VFMSUB213PS m128 xmm xmm +// VFMSUB213PS ymm ymm ymm +// VFMSUB213PS m256 ymm ymm +// Construct and append a VFMSUB213PS instruction to the active function. +func (c *Context) VFMSUB213PS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMSUB213PS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMSUB213PS: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB213PS xmm xmm xmm +// VFMSUB213PS m128 xmm xmm +// VFMSUB213PS ymm ymm ymm +// VFMSUB213PS m256 ymm ymm +// Construct and append a VFMSUB213PS instruction to the active function. +// Operates on the global context. +func VFMSUB213PS(mxy, xy, xy1 operand.Op) { ctx.VFMSUB213PS(mxy, xy, xy1) } + +// VFMSUB213SD: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB213SD xmm xmm xmm +// VFMSUB213SD m64 xmm xmm +// Construct and append a VFMSUB213SD instruction to the active function. +func (c *Context) VFMSUB213SD(mx, x, x1 operand.Op) { + if inst, err := x86.VFMSUB213SD(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMSUB213SD: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB213SD xmm xmm xmm +// VFMSUB213SD m64 xmm xmm +// Construct and append a VFMSUB213SD instruction to the active function. +// Operates on the global context. +func VFMSUB213SD(mx, x, x1 operand.Op) { ctx.VFMSUB213SD(mx, x, x1) } + +// VFMSUB213SS: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB213SS xmm xmm xmm +// VFMSUB213SS m32 xmm xmm +// Construct and append a VFMSUB213SS instruction to the active function. +func (c *Context) VFMSUB213SS(mx, x, x1 operand.Op) { + if inst, err := x86.VFMSUB213SS(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMSUB213SS: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB213SS xmm xmm xmm +// VFMSUB213SS m32 xmm xmm +// Construct and append a VFMSUB213SS instruction to the active function. +// Operates on the global context. +func VFMSUB213SS(mx, x, x1 operand.Op) { ctx.VFMSUB213SS(mx, x, x1) } + +// VFMSUB231PD: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB231PD xmm xmm xmm +// VFMSUB231PD m128 xmm xmm +// VFMSUB231PD ymm ymm ymm +// VFMSUB231PD m256 ymm ymm +// Construct and append a VFMSUB231PD instruction to the active function. +func (c *Context) VFMSUB231PD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMSUB231PD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMSUB231PD: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB231PD xmm xmm xmm +// VFMSUB231PD m128 xmm xmm +// VFMSUB231PD ymm ymm ymm +// VFMSUB231PD m256 ymm ymm +// Construct and append a VFMSUB231PD instruction to the active function. +// Operates on the global context. +func VFMSUB231PD(mxy, xy, xy1 operand.Op) { ctx.VFMSUB231PD(mxy, xy, xy1) } + +// VFMSUB231PS: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB231PS xmm xmm xmm +// VFMSUB231PS m128 xmm xmm +// VFMSUB231PS ymm ymm ymm +// VFMSUB231PS m256 ymm ymm +// Construct and append a VFMSUB231PS instruction to the active function. +func (c *Context) VFMSUB231PS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMSUB231PS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMSUB231PS: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB231PS xmm xmm xmm +// VFMSUB231PS m128 xmm xmm +// VFMSUB231PS ymm ymm ymm +// VFMSUB231PS m256 ymm ymm +// Construct and append a VFMSUB231PS instruction to the active function. +// Operates on the global context. +func VFMSUB231PS(mxy, xy, xy1 operand.Op) { ctx.VFMSUB231PS(mxy, xy, xy1) } + +// VFMSUB231SD: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB231SD xmm xmm xmm +// VFMSUB231SD m64 xmm xmm +// Construct and append a VFMSUB231SD instruction to the active function. +func (c *Context) VFMSUB231SD(mx, x, x1 operand.Op) { + if inst, err := x86.VFMSUB231SD(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMSUB231SD: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB231SD xmm xmm xmm +// VFMSUB231SD m64 xmm xmm +// Construct and append a VFMSUB231SD instruction to the active function. +// Operates on the global context. +func VFMSUB231SD(mx, x, x1 operand.Op) { ctx.VFMSUB231SD(mx, x, x1) } + +// VFMSUB231SS: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB231SS xmm xmm xmm +// VFMSUB231SS m32 xmm xmm +// Construct and append a VFMSUB231SS instruction to the active function. +func (c *Context) VFMSUB231SS(mx, x, x1 operand.Op) { + if inst, err := x86.VFMSUB231SS(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMSUB231SS: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB231SS xmm xmm xmm +// VFMSUB231SS m32 xmm xmm +// Construct and append a VFMSUB231SS instruction to the active function. +// Operates on the global context. +func VFMSUB231SS(mx, x, x1 operand.Op) { ctx.VFMSUB231SS(mx, x, x1) } + +// VFMSUBADD132PD: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUBADD132PD xmm xmm xmm +// VFMSUBADD132PD m128 xmm xmm +// VFMSUBADD132PD ymm ymm ymm +// VFMSUBADD132PD m256 ymm ymm +// Construct and append a VFMSUBADD132PD instruction to the active function. +func (c *Context) VFMSUBADD132PD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMSUBADD132PD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMSUBADD132PD: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUBADD132PD xmm xmm xmm +// VFMSUBADD132PD m128 xmm xmm +// VFMSUBADD132PD ymm ymm ymm +// VFMSUBADD132PD m256 ymm ymm +// Construct and append a VFMSUBADD132PD instruction to the active function. +// Operates on the global context. +func VFMSUBADD132PD(mxy, xy, xy1 operand.Op) { ctx.VFMSUBADD132PD(mxy, xy, xy1) } + +// VFMSUBADD132PS: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUBADD132PS xmm xmm xmm +// VFMSUBADD132PS m128 xmm xmm +// VFMSUBADD132PS ymm ymm ymm +// VFMSUBADD132PS m256 ymm ymm +// Construct and append a VFMSUBADD132PS instruction to the active function. +func (c *Context) VFMSUBADD132PS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMSUBADD132PS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMSUBADD132PS: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUBADD132PS xmm xmm xmm +// VFMSUBADD132PS m128 xmm xmm +// VFMSUBADD132PS ymm ymm ymm +// VFMSUBADD132PS m256 ymm ymm +// Construct and append a VFMSUBADD132PS instruction to the active function. +// Operates on the global context. +func VFMSUBADD132PS(mxy, xy, xy1 operand.Op) { ctx.VFMSUBADD132PS(mxy, xy, xy1) } + +// VFMSUBADD213PD: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUBADD213PD xmm xmm xmm +// VFMSUBADD213PD m128 xmm xmm +// VFMSUBADD213PD ymm ymm ymm +// VFMSUBADD213PD m256 ymm ymm +// Construct and append a VFMSUBADD213PD instruction to the active function. +func (c *Context) VFMSUBADD213PD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMSUBADD213PD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMSUBADD213PD: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUBADD213PD xmm xmm xmm +// VFMSUBADD213PD m128 xmm xmm +// VFMSUBADD213PD ymm ymm ymm +// VFMSUBADD213PD m256 ymm ymm +// Construct and append a VFMSUBADD213PD instruction to the active function. +// Operates on the global context. +func VFMSUBADD213PD(mxy, xy, xy1 operand.Op) { ctx.VFMSUBADD213PD(mxy, xy, xy1) } + +// VFMSUBADD213PS: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUBADD213PS xmm xmm xmm +// VFMSUBADD213PS m128 xmm xmm +// VFMSUBADD213PS ymm ymm ymm +// VFMSUBADD213PS m256 ymm ymm +// Construct and append a VFMSUBADD213PS instruction to the active function. +func (c *Context) VFMSUBADD213PS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMSUBADD213PS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMSUBADD213PS: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUBADD213PS xmm xmm xmm +// VFMSUBADD213PS m128 xmm xmm +// VFMSUBADD213PS ymm ymm ymm +// VFMSUBADD213PS m256 ymm ymm +// Construct and append a VFMSUBADD213PS instruction to the active function. +// Operates on the global context. +func VFMSUBADD213PS(mxy, xy, xy1 operand.Op) { ctx.VFMSUBADD213PS(mxy, xy, xy1) } + +// VFMSUBADD231PD: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUBADD231PD xmm xmm xmm +// VFMSUBADD231PD m128 xmm xmm +// VFMSUBADD231PD ymm ymm ymm +// VFMSUBADD231PD m256 ymm ymm +// Construct and append a VFMSUBADD231PD instruction to the active function. +func (c *Context) VFMSUBADD231PD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMSUBADD231PD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMSUBADD231PD: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUBADD231PD xmm xmm xmm +// VFMSUBADD231PD m128 xmm xmm +// VFMSUBADD231PD ymm ymm ymm +// VFMSUBADD231PD m256 ymm ymm +// Construct and append a VFMSUBADD231PD instruction to the active function. +// Operates on the global context. +func VFMSUBADD231PD(mxy, xy, xy1 operand.Op) { ctx.VFMSUBADD231PD(mxy, xy, xy1) } + +// VFMSUBADD231PS: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUBADD231PS xmm xmm xmm +// VFMSUBADD231PS m128 xmm xmm +// VFMSUBADD231PS ymm ymm ymm +// VFMSUBADD231PS m256 ymm ymm +// Construct and append a VFMSUBADD231PS instruction to the active function. +func (c *Context) VFMSUBADD231PS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFMSUBADD231PS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFMSUBADD231PS: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUBADD231PS xmm xmm xmm +// VFMSUBADD231PS m128 xmm xmm +// VFMSUBADD231PS ymm ymm ymm +// VFMSUBADD231PS m256 ymm ymm +// Construct and append a VFMSUBADD231PS instruction to the active function. +// Operates on the global context. +func VFMSUBADD231PS(mxy, xy, xy1 operand.Op) { ctx.VFMSUBADD231PS(mxy, xy, xy1) } + +// VFNMADD132PD: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD132PD xmm xmm xmm +// VFNMADD132PD m128 xmm xmm +// VFNMADD132PD ymm ymm ymm +// VFNMADD132PD m256 ymm ymm +// Construct and append a VFNMADD132PD instruction to the active function. +func (c *Context) VFNMADD132PD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFNMADD132PD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMADD132PD: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD132PD xmm xmm xmm +// VFNMADD132PD m128 xmm xmm +// VFNMADD132PD ymm ymm ymm +// VFNMADD132PD m256 ymm ymm +// Construct and append a VFNMADD132PD instruction to the active function. +// Operates on the global context. +func VFNMADD132PD(mxy, xy, xy1 operand.Op) { ctx.VFNMADD132PD(mxy, xy, xy1) } + +// VFNMADD132PS: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD132PS xmm xmm xmm +// VFNMADD132PS m128 xmm xmm +// VFNMADD132PS ymm ymm ymm +// VFNMADD132PS m256 ymm ymm +// Construct and append a VFNMADD132PS instruction to the active function. +func (c *Context) VFNMADD132PS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFNMADD132PS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMADD132PS: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD132PS xmm xmm xmm +// VFNMADD132PS m128 xmm xmm +// VFNMADD132PS ymm ymm ymm +// VFNMADD132PS m256 ymm ymm +// Construct and append a VFNMADD132PS instruction to the active function. +// Operates on the global context. +func VFNMADD132PS(mxy, xy, xy1 operand.Op) { ctx.VFNMADD132PS(mxy, xy, xy1) } + +// VFNMADD132SD: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD132SD xmm xmm xmm +// VFNMADD132SD m64 xmm xmm +// Construct and append a VFNMADD132SD instruction to the active function. +func (c *Context) VFNMADD132SD(mx, x, x1 operand.Op) { + if inst, err := x86.VFNMADD132SD(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMADD132SD: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD132SD xmm xmm xmm +// VFNMADD132SD m64 xmm xmm +// Construct and append a VFNMADD132SD instruction to the active function. +// Operates on the global context. +func VFNMADD132SD(mx, x, x1 operand.Op) { ctx.VFNMADD132SD(mx, x, x1) } + +// VFNMADD132SS: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD132SS xmm xmm xmm +// VFNMADD132SS m32 xmm xmm +// Construct and append a VFNMADD132SS instruction to the active function. +func (c *Context) VFNMADD132SS(mx, x, x1 operand.Op) { + if inst, err := x86.VFNMADD132SS(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMADD132SS: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD132SS xmm xmm xmm +// VFNMADD132SS m32 xmm xmm +// Construct and append a VFNMADD132SS instruction to the active function. +// Operates on the global context. +func VFNMADD132SS(mx, x, x1 operand.Op) { ctx.VFNMADD132SS(mx, x, x1) } + +// VFNMADD213PD: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD213PD xmm xmm xmm +// VFNMADD213PD m128 xmm xmm +// VFNMADD213PD ymm ymm ymm +// VFNMADD213PD m256 ymm ymm +// Construct and append a VFNMADD213PD instruction to the active function. +func (c *Context) VFNMADD213PD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFNMADD213PD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMADD213PD: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD213PD xmm xmm xmm +// VFNMADD213PD m128 xmm xmm +// VFNMADD213PD ymm ymm ymm +// VFNMADD213PD m256 ymm ymm +// Construct and append a VFNMADD213PD instruction to the active function. +// Operates on the global context. +func VFNMADD213PD(mxy, xy, xy1 operand.Op) { ctx.VFNMADD213PD(mxy, xy, xy1) } + +// VFNMADD213PS: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD213PS xmm xmm xmm +// VFNMADD213PS m128 xmm xmm +// VFNMADD213PS ymm ymm ymm +// VFNMADD213PS m256 ymm ymm +// Construct and append a VFNMADD213PS instruction to the active function. +func (c *Context) VFNMADD213PS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFNMADD213PS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMADD213PS: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD213PS xmm xmm xmm +// VFNMADD213PS m128 xmm xmm +// VFNMADD213PS ymm ymm ymm +// VFNMADD213PS m256 ymm ymm +// Construct and append a VFNMADD213PS instruction to the active function. +// Operates on the global context. +func VFNMADD213PS(mxy, xy, xy1 operand.Op) { ctx.VFNMADD213PS(mxy, xy, xy1) } + +// VFNMADD213SD: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD213SD xmm xmm xmm +// VFNMADD213SD m64 xmm xmm +// Construct and append a VFNMADD213SD instruction to the active function. +func (c *Context) VFNMADD213SD(mx, x, x1 operand.Op) { + if inst, err := x86.VFNMADD213SD(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMADD213SD: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD213SD xmm xmm xmm +// VFNMADD213SD m64 xmm xmm +// Construct and append a VFNMADD213SD instruction to the active function. +// Operates on the global context. +func VFNMADD213SD(mx, x, x1 operand.Op) { ctx.VFNMADD213SD(mx, x, x1) } + +// VFNMADD213SS: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD213SS xmm xmm xmm +// VFNMADD213SS m32 xmm xmm +// Construct and append a VFNMADD213SS instruction to the active function. +func (c *Context) VFNMADD213SS(mx, x, x1 operand.Op) { + if inst, err := x86.VFNMADD213SS(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMADD213SS: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD213SS xmm xmm xmm +// VFNMADD213SS m32 xmm xmm +// Construct and append a VFNMADD213SS instruction to the active function. +// Operates on the global context. +func VFNMADD213SS(mx, x, x1 operand.Op) { ctx.VFNMADD213SS(mx, x, x1) } + +// VFNMADD231PD: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD231PD xmm xmm xmm +// VFNMADD231PD m128 xmm xmm +// VFNMADD231PD ymm ymm ymm +// VFNMADD231PD m256 ymm ymm +// Construct and append a VFNMADD231PD instruction to the active function. +func (c *Context) VFNMADD231PD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFNMADD231PD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMADD231PD: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD231PD xmm xmm xmm +// VFNMADD231PD m128 xmm xmm +// VFNMADD231PD ymm ymm ymm +// VFNMADD231PD m256 ymm ymm +// Construct and append a VFNMADD231PD instruction to the active function. +// Operates on the global context. +func VFNMADD231PD(mxy, xy, xy1 operand.Op) { ctx.VFNMADD231PD(mxy, xy, xy1) } + +// VFNMADD231PS: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD231PS xmm xmm xmm +// VFNMADD231PS m128 xmm xmm +// VFNMADD231PS ymm ymm ymm +// VFNMADD231PS m256 ymm ymm +// Construct and append a VFNMADD231PS instruction to the active function. +func (c *Context) VFNMADD231PS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFNMADD231PS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMADD231PS: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD231PS xmm xmm xmm +// VFNMADD231PS m128 xmm xmm +// VFNMADD231PS ymm ymm ymm +// VFNMADD231PS m256 ymm ymm +// Construct and append a VFNMADD231PS instruction to the active function. +// Operates on the global context. +func VFNMADD231PS(mxy, xy, xy1 operand.Op) { ctx.VFNMADD231PS(mxy, xy, xy1) } + +// VFNMADD231SD: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD231SD xmm xmm xmm +// VFNMADD231SD m64 xmm xmm +// Construct and append a VFNMADD231SD instruction to the active function. +func (c *Context) VFNMADD231SD(mx, x, x1 operand.Op) { + if inst, err := x86.VFNMADD231SD(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMADD231SD: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD231SD xmm xmm xmm +// VFNMADD231SD m64 xmm xmm +// Construct and append a VFNMADD231SD instruction to the active function. +// Operates on the global context. +func VFNMADD231SD(mx, x, x1 operand.Op) { ctx.VFNMADD231SD(mx, x, x1) } + +// VFNMADD231SS: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD231SS xmm xmm xmm +// VFNMADD231SS m32 xmm xmm +// Construct and append a VFNMADD231SS instruction to the active function. +func (c *Context) VFNMADD231SS(mx, x, x1 operand.Op) { + if inst, err := x86.VFNMADD231SS(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMADD231SS: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD231SS xmm xmm xmm +// VFNMADD231SS m32 xmm xmm +// Construct and append a VFNMADD231SS instruction to the active function. +// Operates on the global context. +func VFNMADD231SS(mx, x, x1 operand.Op) { ctx.VFNMADD231SS(mx, x, x1) } + +// VFNMSUB132PD: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB132PD xmm xmm xmm +// VFNMSUB132PD m128 xmm xmm +// VFNMSUB132PD ymm ymm ymm +// VFNMSUB132PD m256 ymm ymm +// Construct and append a VFNMSUB132PD instruction to the active function. +func (c *Context) VFNMSUB132PD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFNMSUB132PD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMSUB132PD: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB132PD xmm xmm xmm +// VFNMSUB132PD m128 xmm xmm +// VFNMSUB132PD ymm ymm ymm +// VFNMSUB132PD m256 ymm ymm +// Construct and append a VFNMSUB132PD instruction to the active function. +// Operates on the global context. +func VFNMSUB132PD(mxy, xy, xy1 operand.Op) { ctx.VFNMSUB132PD(mxy, xy, xy1) } + +// VFNMSUB132PS: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB132PS xmm xmm xmm +// VFNMSUB132PS m128 xmm xmm +// VFNMSUB132PS ymm ymm ymm +// VFNMSUB132PS m256 ymm ymm +// Construct and append a VFNMSUB132PS instruction to the active function. +func (c *Context) VFNMSUB132PS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFNMSUB132PS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMSUB132PS: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB132PS xmm xmm xmm +// VFNMSUB132PS m128 xmm xmm +// VFNMSUB132PS ymm ymm ymm +// VFNMSUB132PS m256 ymm ymm +// Construct and append a VFNMSUB132PS instruction to the active function. +// Operates on the global context. +func VFNMSUB132PS(mxy, xy, xy1 operand.Op) { ctx.VFNMSUB132PS(mxy, xy, xy1) } + +// VFNMSUB132SD: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB132SD xmm xmm xmm +// VFNMSUB132SD m64 xmm xmm +// Construct and append a VFNMSUB132SD instruction to the active function. +func (c *Context) VFNMSUB132SD(mx, x, x1 operand.Op) { + if inst, err := x86.VFNMSUB132SD(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMSUB132SD: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB132SD xmm xmm xmm +// VFNMSUB132SD m64 xmm xmm +// Construct and append a VFNMSUB132SD instruction to the active function. +// Operates on the global context. +func VFNMSUB132SD(mx, x, x1 operand.Op) { ctx.VFNMSUB132SD(mx, x, x1) } + +// VFNMSUB132SS: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB132SS xmm xmm xmm +// VFNMSUB132SS m32 xmm xmm +// Construct and append a VFNMSUB132SS instruction to the active function. +func (c *Context) VFNMSUB132SS(mx, x, x1 operand.Op) { + if inst, err := x86.VFNMSUB132SS(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMSUB132SS: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB132SS xmm xmm xmm +// VFNMSUB132SS m32 xmm xmm +// Construct and append a VFNMSUB132SS instruction to the active function. +// Operates on the global context. +func VFNMSUB132SS(mx, x, x1 operand.Op) { ctx.VFNMSUB132SS(mx, x, x1) } + +// VFNMSUB213PD: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB213PD xmm xmm xmm +// VFNMSUB213PD m128 xmm xmm +// VFNMSUB213PD ymm ymm ymm +// VFNMSUB213PD m256 ymm ymm +// Construct and append a VFNMSUB213PD instruction to the active function. +func (c *Context) VFNMSUB213PD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFNMSUB213PD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMSUB213PD: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB213PD xmm xmm xmm +// VFNMSUB213PD m128 xmm xmm +// VFNMSUB213PD ymm ymm ymm +// VFNMSUB213PD m256 ymm ymm +// Construct and append a VFNMSUB213PD instruction to the active function. +// Operates on the global context. +func VFNMSUB213PD(mxy, xy, xy1 operand.Op) { ctx.VFNMSUB213PD(mxy, xy, xy1) } + +// VFNMSUB213PS: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB213PS xmm xmm xmm +// VFNMSUB213PS m128 xmm xmm +// VFNMSUB213PS ymm ymm ymm +// VFNMSUB213PS m256 ymm ymm +// Construct and append a VFNMSUB213PS instruction to the active function. +func (c *Context) VFNMSUB213PS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFNMSUB213PS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMSUB213PS: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB213PS xmm xmm xmm +// VFNMSUB213PS m128 xmm xmm +// VFNMSUB213PS ymm ymm ymm +// VFNMSUB213PS m256 ymm ymm +// Construct and append a VFNMSUB213PS instruction to the active function. +// Operates on the global context. +func VFNMSUB213PS(mxy, xy, xy1 operand.Op) { ctx.VFNMSUB213PS(mxy, xy, xy1) } + +// VFNMSUB213SD: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB213SD xmm xmm xmm +// VFNMSUB213SD m64 xmm xmm +// Construct and append a VFNMSUB213SD instruction to the active function. +func (c *Context) VFNMSUB213SD(mx, x, x1 operand.Op) { + if inst, err := x86.VFNMSUB213SD(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMSUB213SD: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB213SD xmm xmm xmm +// VFNMSUB213SD m64 xmm xmm +// Construct and append a VFNMSUB213SD instruction to the active function. +// Operates on the global context. +func VFNMSUB213SD(mx, x, x1 operand.Op) { ctx.VFNMSUB213SD(mx, x, x1) } + +// VFNMSUB213SS: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB213SS xmm xmm xmm +// VFNMSUB213SS m32 xmm xmm +// Construct and append a VFNMSUB213SS instruction to the active function. +func (c *Context) VFNMSUB213SS(mx, x, x1 operand.Op) { + if inst, err := x86.VFNMSUB213SS(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMSUB213SS: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB213SS xmm xmm xmm +// VFNMSUB213SS m32 xmm xmm +// Construct and append a VFNMSUB213SS instruction to the active function. +// Operates on the global context. +func VFNMSUB213SS(mx, x, x1 operand.Op) { ctx.VFNMSUB213SS(mx, x, x1) } + +// VFNMSUB231PD: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB231PD xmm xmm xmm +// VFNMSUB231PD m128 xmm xmm +// VFNMSUB231PD ymm ymm ymm +// VFNMSUB231PD m256 ymm ymm +// Construct and append a VFNMSUB231PD instruction to the active function. +func (c *Context) VFNMSUB231PD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFNMSUB231PD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMSUB231PD: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB231PD xmm xmm xmm +// VFNMSUB231PD m128 xmm xmm +// VFNMSUB231PD ymm ymm ymm +// VFNMSUB231PD m256 ymm ymm +// Construct and append a VFNMSUB231PD instruction to the active function. +// Operates on the global context. +func VFNMSUB231PD(mxy, xy, xy1 operand.Op) { ctx.VFNMSUB231PD(mxy, xy, xy1) } + +// VFNMSUB231PS: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB231PS xmm xmm xmm +// VFNMSUB231PS m128 xmm xmm +// VFNMSUB231PS ymm ymm ymm +// VFNMSUB231PS m256 ymm ymm +// Construct and append a VFNMSUB231PS instruction to the active function. +func (c *Context) VFNMSUB231PS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VFNMSUB231PS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMSUB231PS: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB231PS xmm xmm xmm +// VFNMSUB231PS m128 xmm xmm +// VFNMSUB231PS ymm ymm ymm +// VFNMSUB231PS m256 ymm ymm +// Construct and append a VFNMSUB231PS instruction to the active function. +// Operates on the global context. +func VFNMSUB231PS(mxy, xy, xy1 operand.Op) { ctx.VFNMSUB231PS(mxy, xy, xy1) } + +// VFNMSUB231SD: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB231SD xmm xmm xmm +// VFNMSUB231SD m64 xmm xmm +// Construct and append a VFNMSUB231SD instruction to the active function. +func (c *Context) VFNMSUB231SD(mx, x, x1 operand.Op) { + if inst, err := x86.VFNMSUB231SD(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMSUB231SD: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB231SD xmm xmm xmm +// VFNMSUB231SD m64 xmm xmm +// Construct and append a VFNMSUB231SD instruction to the active function. +// Operates on the global context. +func VFNMSUB231SD(mx, x, x1 operand.Op) { ctx.VFNMSUB231SD(mx, x, x1) } + +// VFNMSUB231SS: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB231SS xmm xmm xmm +// VFNMSUB231SS m32 xmm xmm +// Construct and append a VFNMSUB231SS instruction to the active function. +func (c *Context) VFNMSUB231SS(mx, x, x1 operand.Op) { + if inst, err := x86.VFNMSUB231SS(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VFNMSUB231SS: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB231SS xmm xmm xmm +// VFNMSUB231SS m32 xmm xmm +// Construct and append a VFNMSUB231SS instruction to the active function. +// Operates on the global context. +func VFNMSUB231SS(mx, x, x1 operand.Op) { ctx.VFNMSUB231SS(mx, x, x1) } + +// VGATHERDPD: Gather Packed Double-Precision Floating-Point Values Using Signed Doubleword Indices. +// +// Forms: +// +// VGATHERDPD xmm vm32x xmm +// VGATHERDPD ymm vm32x ymm +// Construct and append a VGATHERDPD instruction to the active function. +func (c *Context) VGATHERDPD(xy, v, xy1 operand.Op) { + if inst, err := x86.VGATHERDPD(xy, v, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VGATHERDPD: Gather Packed Double-Precision Floating-Point Values Using Signed Doubleword Indices. +// +// Forms: +// +// VGATHERDPD xmm vm32x xmm +// VGATHERDPD ymm vm32x ymm +// Construct and append a VGATHERDPD instruction to the active function. +// Operates on the global context. +func VGATHERDPD(xy, v, xy1 operand.Op) { ctx.VGATHERDPD(xy, v, xy1) } + +// VGATHERDPS: Gather Packed Single-Precision Floating-Point Values Using Signed Doubleword Indices. +// +// Forms: +// +// VGATHERDPS xmm vm32x xmm +// VGATHERDPS ymm vm32y ymm +// Construct and append a VGATHERDPS instruction to the active function. +func (c *Context) VGATHERDPS(xy, v, xy1 operand.Op) { + if inst, err := x86.VGATHERDPS(xy, v, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VGATHERDPS: Gather Packed Single-Precision Floating-Point Values Using Signed Doubleword Indices. +// +// Forms: +// +// VGATHERDPS xmm vm32x xmm +// VGATHERDPS ymm vm32y ymm +// Construct and append a VGATHERDPS instruction to the active function. +// Operates on the global context. +func VGATHERDPS(xy, v, xy1 operand.Op) { ctx.VGATHERDPS(xy, v, xy1) } + +// VGATHERQPD: Gather Packed Double-Precision Floating-Point Values Using Signed Quadword Indices. +// +// Forms: +// +// VGATHERQPD xmm vm64x xmm +// VGATHERQPD ymm vm64y ymm +// Construct and append a VGATHERQPD instruction to the active function. +func (c *Context) VGATHERQPD(xy, v, xy1 operand.Op) { + if inst, err := x86.VGATHERQPD(xy, v, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VGATHERQPD: Gather Packed Double-Precision Floating-Point Values Using Signed Quadword Indices. +// +// Forms: +// +// VGATHERQPD xmm vm64x xmm +// VGATHERQPD ymm vm64y ymm +// Construct and append a VGATHERQPD instruction to the active function. +// Operates on the global context. +func VGATHERQPD(xy, v, xy1 operand.Op) { ctx.VGATHERQPD(xy, v, xy1) } + +// VGATHERQPS: Gather Packed Single-Precision Floating-Point Values Using Signed Quadword Indices. +// +// Forms: +// +// VGATHERQPS xmm vm64x xmm +// VGATHERQPS xmm vm64y xmm +// Construct and append a VGATHERQPS instruction to the active function. +func (c *Context) VGATHERQPS(x, v, x1 operand.Op) { + if inst, err := x86.VGATHERQPS(x, v, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VGATHERQPS: Gather Packed Single-Precision Floating-Point Values Using Signed Quadword Indices. +// +// Forms: +// +// VGATHERQPS xmm vm64x xmm +// VGATHERQPS xmm vm64y xmm +// Construct and append a VGATHERQPS instruction to the active function. +// Operates on the global context. +func VGATHERQPS(x, v, x1 operand.Op) { ctx.VGATHERQPS(x, v, x1) } + +// VHADDPD: Packed Double-FP Horizontal Add. +// +// Forms: +// +// VHADDPD xmm xmm xmm +// VHADDPD m128 xmm xmm +// VHADDPD ymm ymm ymm +// VHADDPD m256 ymm ymm +// Construct and append a VHADDPD instruction to the active function. +func (c *Context) VHADDPD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VHADDPD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VHADDPD: Packed Double-FP Horizontal Add. +// +// Forms: +// +// VHADDPD xmm xmm xmm +// VHADDPD m128 xmm xmm +// VHADDPD ymm ymm ymm +// VHADDPD m256 ymm ymm +// Construct and append a VHADDPD instruction to the active function. +// Operates on the global context. +func VHADDPD(mxy, xy, xy1 operand.Op) { ctx.VHADDPD(mxy, xy, xy1) } + +// VHADDPS: Packed Single-FP Horizontal Add. +// +// Forms: +// +// VHADDPS xmm xmm xmm +// VHADDPS m128 xmm xmm +// VHADDPS ymm ymm ymm +// VHADDPS m256 ymm ymm +// Construct and append a VHADDPS instruction to the active function. +func (c *Context) VHADDPS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VHADDPS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VHADDPS: Packed Single-FP Horizontal Add. +// +// Forms: +// +// VHADDPS xmm xmm xmm +// VHADDPS m128 xmm xmm +// VHADDPS ymm ymm ymm +// VHADDPS m256 ymm ymm +// Construct and append a VHADDPS instruction to the active function. +// Operates on the global context. +func VHADDPS(mxy, xy, xy1 operand.Op) { ctx.VHADDPS(mxy, xy, xy1) } + +// VHSUBPD: Packed Double-FP Horizontal Subtract. +// +// Forms: +// +// VHSUBPD xmm xmm xmm +// VHSUBPD m128 xmm xmm +// VHSUBPD ymm ymm ymm +// VHSUBPD m256 ymm ymm +// Construct and append a VHSUBPD instruction to the active function. +func (c *Context) VHSUBPD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VHSUBPD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VHSUBPD: Packed Double-FP Horizontal Subtract. +// +// Forms: +// +// VHSUBPD xmm xmm xmm +// VHSUBPD m128 xmm xmm +// VHSUBPD ymm ymm ymm +// VHSUBPD m256 ymm ymm +// Construct and append a VHSUBPD instruction to the active function. +// Operates on the global context. +func VHSUBPD(mxy, xy, xy1 operand.Op) { ctx.VHSUBPD(mxy, xy, xy1) } + +// VHSUBPS: Packed Single-FP Horizontal Subtract. +// +// Forms: +// +// VHSUBPS xmm xmm xmm +// VHSUBPS m128 xmm xmm +// VHSUBPS ymm ymm ymm +// VHSUBPS m256 ymm ymm +// Construct and append a VHSUBPS instruction to the active function. +func (c *Context) VHSUBPS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VHSUBPS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VHSUBPS: Packed Single-FP Horizontal Subtract. +// +// Forms: +// +// VHSUBPS xmm xmm xmm +// VHSUBPS m128 xmm xmm +// VHSUBPS ymm ymm ymm +// VHSUBPS m256 ymm ymm +// Construct and append a VHSUBPS instruction to the active function. +// Operates on the global context. +func VHSUBPS(mxy, xy, xy1 operand.Op) { ctx.VHSUBPS(mxy, xy, xy1) } + +// VINSERTF128: Insert Packed Floating-Point Values. +// +// Forms: +// +// VINSERTF128 imm8 xmm ymm ymm +// VINSERTF128 imm8 m128 ymm ymm +// Construct and append a VINSERTF128 instruction to the active function. +func (c *Context) VINSERTF128(i, mx, y, y1 operand.Op) { + if inst, err := x86.VINSERTF128(i, mx, y, y1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VINSERTF128: Insert Packed Floating-Point Values. +// +// Forms: +// +// VINSERTF128 imm8 xmm ymm ymm +// VINSERTF128 imm8 m128 ymm ymm +// Construct and append a VINSERTF128 instruction to the active function. +// Operates on the global context. +func VINSERTF128(i, mx, y, y1 operand.Op) { ctx.VINSERTF128(i, mx, y, y1) } + +// VINSERTI128: Insert Packed Integer Values. +// +// Forms: +// +// VINSERTI128 imm8 xmm ymm ymm +// VINSERTI128 imm8 m128 ymm ymm +// Construct and append a VINSERTI128 instruction to the active function. +func (c *Context) VINSERTI128(i, mx, y, y1 operand.Op) { + if inst, err := x86.VINSERTI128(i, mx, y, y1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VINSERTI128: Insert Packed Integer Values. +// +// Forms: +// +// VINSERTI128 imm8 xmm ymm ymm +// VINSERTI128 imm8 m128 ymm ymm +// Construct and append a VINSERTI128 instruction to the active function. +// Operates on the global context. +func VINSERTI128(i, mx, y, y1 operand.Op) { ctx.VINSERTI128(i, mx, y, y1) } + +// VINSERTPS: Insert Packed Single Precision Floating-Point Value. +// +// Forms: +// +// VINSERTPS imm8 xmm xmm xmm +// VINSERTPS imm8 m32 xmm xmm +// Construct and append a VINSERTPS instruction to the active function. +func (c *Context) VINSERTPS(i, mx, x, x1 operand.Op) { + if inst, err := x86.VINSERTPS(i, mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VINSERTPS: Insert Packed Single Precision Floating-Point Value. +// +// Forms: +// +// VINSERTPS imm8 xmm xmm xmm +// VINSERTPS imm8 m32 xmm xmm +// Construct and append a VINSERTPS instruction to the active function. +// Operates on the global context. +func VINSERTPS(i, mx, x, x1 operand.Op) { ctx.VINSERTPS(i, mx, x, x1) } + +// VLDDQU: Load Unaligned Integer 128 Bits. +// +// Forms: +// +// VLDDQU m128 xmm +// VLDDQU m256 ymm +// Construct and append a VLDDQU instruction to the active function. +func (c *Context) VLDDQU(m, xy operand.Op) { + if inst, err := x86.VLDDQU(m, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VLDDQU: Load Unaligned Integer 128 Bits. +// +// Forms: +// +// VLDDQU m128 xmm +// VLDDQU m256 ymm +// Construct and append a VLDDQU instruction to the active function. +// Operates on the global context. +func VLDDQU(m, xy operand.Op) { ctx.VLDDQU(m, xy) } + +// VLDMXCSR: Load MXCSR Register. +// +// Forms: +// +// VLDMXCSR m32 +// Construct and append a VLDMXCSR instruction to the active function. +func (c *Context) VLDMXCSR(m operand.Op) { + if inst, err := x86.VLDMXCSR(m); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VLDMXCSR: Load MXCSR Register. +// +// Forms: +// +// VLDMXCSR m32 +// Construct and append a VLDMXCSR instruction to the active function. +// Operates on the global context. +func VLDMXCSR(m operand.Op) { ctx.VLDMXCSR(m) } + +// VMASKMOVDQU: Store Selected Bytes of Double Quadword. +// +// Forms: +// +// VMASKMOVDQU xmm xmm +// Construct and append a VMASKMOVDQU instruction to the active function. +func (c *Context) VMASKMOVDQU(x, x1 operand.Op) { + if inst, err := x86.VMASKMOVDQU(x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMASKMOVDQU: Store Selected Bytes of Double Quadword. +// +// Forms: +// +// VMASKMOVDQU xmm xmm +// Construct and append a VMASKMOVDQU instruction to the active function. +// Operates on the global context. +func VMASKMOVDQU(x, x1 operand.Op) { ctx.VMASKMOVDQU(x, x1) } + +// VMASKMOVPD: Conditional Move Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VMASKMOVPD m128 xmm xmm +// VMASKMOVPD m256 ymm ymm +// VMASKMOVPD xmm xmm m128 +// VMASKMOVPD ymm ymm m256 +// Construct and append a VMASKMOVPD instruction to the active function. +func (c *Context) VMASKMOVPD(mxy, xy, mxy1 operand.Op) { + if inst, err := x86.VMASKMOVPD(mxy, xy, mxy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMASKMOVPD: Conditional Move Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VMASKMOVPD m128 xmm xmm +// VMASKMOVPD m256 ymm ymm +// VMASKMOVPD xmm xmm m128 +// VMASKMOVPD ymm ymm m256 +// Construct and append a VMASKMOVPD instruction to the active function. +// Operates on the global context. +func VMASKMOVPD(mxy, xy, mxy1 operand.Op) { ctx.VMASKMOVPD(mxy, xy, mxy1) } + +// VMASKMOVPS: Conditional Move Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMASKMOVPS m128 xmm xmm +// VMASKMOVPS m256 ymm ymm +// VMASKMOVPS xmm xmm m128 +// VMASKMOVPS ymm ymm m256 +// Construct and append a VMASKMOVPS instruction to the active function. +func (c *Context) VMASKMOVPS(mxy, xy, mxy1 operand.Op) { + if inst, err := x86.VMASKMOVPS(mxy, xy, mxy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMASKMOVPS: Conditional Move Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMASKMOVPS m128 xmm xmm +// VMASKMOVPS m256 ymm ymm +// VMASKMOVPS xmm xmm m128 +// VMASKMOVPS ymm ymm m256 +// Construct and append a VMASKMOVPS instruction to the active function. +// Operates on the global context. +func VMASKMOVPS(mxy, xy, mxy1 operand.Op) { ctx.VMASKMOVPS(mxy, xy, mxy1) } + +// VMAXPD: Return Maximum Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VMAXPD xmm xmm xmm +// VMAXPD m128 xmm xmm +// VMAXPD ymm ymm ymm +// VMAXPD m256 ymm ymm +// Construct and append a VMAXPD instruction to the active function. +func (c *Context) VMAXPD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VMAXPD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMAXPD: Return Maximum Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VMAXPD xmm xmm xmm +// VMAXPD m128 xmm xmm +// VMAXPD ymm ymm ymm +// VMAXPD m256 ymm ymm +// Construct and append a VMAXPD instruction to the active function. +// Operates on the global context. +func VMAXPD(mxy, xy, xy1 operand.Op) { ctx.VMAXPD(mxy, xy, xy1) } + +// VMAXPS: Return Maximum Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMAXPS xmm xmm xmm +// VMAXPS m128 xmm xmm +// VMAXPS ymm ymm ymm +// VMAXPS m256 ymm ymm +// Construct and append a VMAXPS instruction to the active function. +func (c *Context) VMAXPS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VMAXPS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMAXPS: Return Maximum Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMAXPS xmm xmm xmm +// VMAXPS m128 xmm xmm +// VMAXPS ymm ymm ymm +// VMAXPS m256 ymm ymm +// Construct and append a VMAXPS instruction to the active function. +// Operates on the global context. +func VMAXPS(mxy, xy, xy1 operand.Op) { ctx.VMAXPS(mxy, xy, xy1) } + +// VMAXSD: Return Maximum Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// VMAXSD xmm xmm xmm +// VMAXSD m64 xmm xmm +// Construct and append a VMAXSD instruction to the active function. +func (c *Context) VMAXSD(mx, x, x1 operand.Op) { + if inst, err := x86.VMAXSD(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMAXSD: Return Maximum Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// VMAXSD xmm xmm xmm +// VMAXSD m64 xmm xmm +// Construct and append a VMAXSD instruction to the active function. +// Operates on the global context. +func VMAXSD(mx, x, x1 operand.Op) { ctx.VMAXSD(mx, x, x1) } + +// VMAXSS: Return Maximum Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// VMAXSS xmm xmm xmm +// VMAXSS m32 xmm xmm +// Construct and append a VMAXSS instruction to the active function. +func (c *Context) VMAXSS(mx, x, x1 operand.Op) { + if inst, err := x86.VMAXSS(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMAXSS: Return Maximum Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// VMAXSS xmm xmm xmm +// VMAXSS m32 xmm xmm +// Construct and append a VMAXSS instruction to the active function. +// Operates on the global context. +func VMAXSS(mx, x, x1 operand.Op) { ctx.VMAXSS(mx, x, x1) } + +// VMINPD: Return Minimum Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VMINPD xmm xmm xmm +// VMINPD m128 xmm xmm +// VMINPD ymm ymm ymm +// VMINPD m256 ymm ymm +// Construct and append a VMINPD instruction to the active function. +func (c *Context) VMINPD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VMINPD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMINPD: Return Minimum Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VMINPD xmm xmm xmm +// VMINPD m128 xmm xmm +// VMINPD ymm ymm ymm +// VMINPD m256 ymm ymm +// Construct and append a VMINPD instruction to the active function. +// Operates on the global context. +func VMINPD(mxy, xy, xy1 operand.Op) { ctx.VMINPD(mxy, xy, xy1) } + +// VMINPS: Return Minimum Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMINPS xmm xmm xmm +// VMINPS m128 xmm xmm +// VMINPS ymm ymm ymm +// VMINPS m256 ymm ymm +// Construct and append a VMINPS instruction to the active function. +func (c *Context) VMINPS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VMINPS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMINPS: Return Minimum Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMINPS xmm xmm xmm +// VMINPS m128 xmm xmm +// VMINPS ymm ymm ymm +// VMINPS m256 ymm ymm +// Construct and append a VMINPS instruction to the active function. +// Operates on the global context. +func VMINPS(mxy, xy, xy1 operand.Op) { ctx.VMINPS(mxy, xy, xy1) } + +// VMINSD: Return Minimum Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// VMINSD xmm xmm xmm +// VMINSD m64 xmm xmm +// Construct and append a VMINSD instruction to the active function. +func (c *Context) VMINSD(mx, x, x1 operand.Op) { + if inst, err := x86.VMINSD(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMINSD: Return Minimum Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// VMINSD xmm xmm xmm +// VMINSD m64 xmm xmm +// Construct and append a VMINSD instruction to the active function. +// Operates on the global context. +func VMINSD(mx, x, x1 operand.Op) { ctx.VMINSD(mx, x, x1) } + +// VMINSS: Return Minimum Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// VMINSS xmm xmm xmm +// VMINSS m32 xmm xmm +// Construct and append a VMINSS instruction to the active function. +func (c *Context) VMINSS(mx, x, x1 operand.Op) { + if inst, err := x86.VMINSS(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMINSS: Return Minimum Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// VMINSS xmm xmm xmm +// VMINSS m32 xmm xmm +// Construct and append a VMINSS instruction to the active function. +// Operates on the global context. +func VMINSS(mx, x, x1 operand.Op) { ctx.VMINSS(mx, x, x1) } + +// VMOVAPD: Move Aligned Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VMOVAPD xmm xmm +// VMOVAPD m128 xmm +// VMOVAPD ymm ymm +// VMOVAPD m256 ymm +// VMOVAPD xmm m128 +// VMOVAPD ymm m256 +// Construct and append a VMOVAPD instruction to the active function. +func (c *Context) VMOVAPD(mxy, mxy1 operand.Op) { + if inst, err := x86.VMOVAPD(mxy, mxy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVAPD: Move Aligned Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VMOVAPD xmm xmm +// VMOVAPD m128 xmm +// VMOVAPD ymm ymm +// VMOVAPD m256 ymm +// VMOVAPD xmm m128 +// VMOVAPD ymm m256 +// Construct and append a VMOVAPD instruction to the active function. +// Operates on the global context. +func VMOVAPD(mxy, mxy1 operand.Op) { ctx.VMOVAPD(mxy, mxy1) } + +// VMOVAPS: Move Aligned Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMOVAPS xmm xmm +// VMOVAPS m128 xmm +// VMOVAPS ymm ymm +// VMOVAPS m256 ymm +// VMOVAPS xmm m128 +// VMOVAPS ymm m256 +// Construct and append a VMOVAPS instruction to the active function. +func (c *Context) VMOVAPS(mxy, mxy1 operand.Op) { + if inst, err := x86.VMOVAPS(mxy, mxy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVAPS: Move Aligned Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMOVAPS xmm xmm +// VMOVAPS m128 xmm +// VMOVAPS ymm ymm +// VMOVAPS m256 ymm +// VMOVAPS xmm m128 +// VMOVAPS ymm m256 +// Construct and append a VMOVAPS instruction to the active function. +// Operates on the global context. +func VMOVAPS(mxy, mxy1 operand.Op) { ctx.VMOVAPS(mxy, mxy1) } + +// VMOVD: Move Doubleword. +// +// Forms: +// +// VMOVD xmm r32 +// VMOVD r32 xmm +// VMOVD m32 xmm +// VMOVD xmm m32 +// Construct and append a VMOVD instruction to the active function. +func (c *Context) VMOVD(mrx, mrx1 operand.Op) { + if inst, err := x86.VMOVD(mrx, mrx1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVD: Move Doubleword. +// +// Forms: +// +// VMOVD xmm r32 +// VMOVD r32 xmm +// VMOVD m32 xmm +// VMOVD xmm m32 +// Construct and append a VMOVD instruction to the active function. +// Operates on the global context. +func VMOVD(mrx, mrx1 operand.Op) { ctx.VMOVD(mrx, mrx1) } + +// VMOVDDUP: Move One Double-FP and Duplicate. +// +// Forms: +// +// VMOVDDUP xmm xmm +// VMOVDDUP m64 xmm +// VMOVDDUP ymm ymm +// VMOVDDUP m256 ymm +// Construct and append a VMOVDDUP instruction to the active function. +func (c *Context) VMOVDDUP(mxy, xy operand.Op) { + if inst, err := x86.VMOVDDUP(mxy, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVDDUP: Move One Double-FP and Duplicate. +// +// Forms: +// +// VMOVDDUP xmm xmm +// VMOVDDUP m64 xmm +// VMOVDDUP ymm ymm +// VMOVDDUP m256 ymm +// Construct and append a VMOVDDUP instruction to the active function. +// Operates on the global context. +func VMOVDDUP(mxy, xy operand.Op) { ctx.VMOVDDUP(mxy, xy) } + +// VMOVDQA: Move Aligned Double Quadword. +// +// Forms: +// +// VMOVDQA xmm xmm +// VMOVDQA m128 xmm +// VMOVDQA ymm ymm +// VMOVDQA m256 ymm +// VMOVDQA xmm m128 +// VMOVDQA ymm m256 +// Construct and append a VMOVDQA instruction to the active function. +func (c *Context) VMOVDQA(mxy, mxy1 operand.Op) { + if inst, err := x86.VMOVDQA(mxy, mxy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVDQA: Move Aligned Double Quadword. +// +// Forms: +// +// VMOVDQA xmm xmm +// VMOVDQA m128 xmm +// VMOVDQA ymm ymm +// VMOVDQA m256 ymm +// VMOVDQA xmm m128 +// VMOVDQA ymm m256 +// Construct and append a VMOVDQA instruction to the active function. +// Operates on the global context. +func VMOVDQA(mxy, mxy1 operand.Op) { ctx.VMOVDQA(mxy, mxy1) } + +// VMOVDQU: Move Unaligned Double Quadword. +// +// Forms: +// +// VMOVDQU xmm xmm +// VMOVDQU m128 xmm +// VMOVDQU ymm ymm +// VMOVDQU m256 ymm +// VMOVDQU xmm m128 +// VMOVDQU ymm m256 +// Construct and append a VMOVDQU instruction to the active function. +func (c *Context) VMOVDQU(mxy, mxy1 operand.Op) { + if inst, err := x86.VMOVDQU(mxy, mxy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVDQU: Move Unaligned Double Quadword. +// +// Forms: +// +// VMOVDQU xmm xmm +// VMOVDQU m128 xmm +// VMOVDQU ymm ymm +// VMOVDQU m256 ymm +// VMOVDQU xmm m128 +// VMOVDQU ymm m256 +// Construct and append a VMOVDQU instruction to the active function. +// Operates on the global context. +func VMOVDQU(mxy, mxy1 operand.Op) { ctx.VMOVDQU(mxy, mxy1) } + +// VMOVHLPS: Move Packed Single-Precision Floating-Point Values High to Low. +// +// Forms: +// +// VMOVHLPS xmm xmm xmm +// Construct and append a VMOVHLPS instruction to the active function. +func (c *Context) VMOVHLPS(x, x1, x2 operand.Op) { + if inst, err := x86.VMOVHLPS(x, x1, x2); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVHLPS: Move Packed Single-Precision Floating-Point Values High to Low. +// +// Forms: +// +// VMOVHLPS xmm xmm xmm +// Construct and append a VMOVHLPS instruction to the active function. +// Operates on the global context. +func VMOVHLPS(x, x1, x2 operand.Op) { ctx.VMOVHLPS(x, x1, x2) } + +// VMOVHPD: Move High Packed Double-Precision Floating-Point Value. +// +// Forms: +// +// VMOVHPD xmm m64 +// VMOVHPD m64 xmm xmm +// Construct and append a VMOVHPD instruction to the active function. +func (c *Context) VMOVHPD(ops ...operand.Op) { + if inst, err := x86.VMOVHPD(ops...); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVHPD: Move High Packed Double-Precision Floating-Point Value. +// +// Forms: +// +// VMOVHPD xmm m64 +// VMOVHPD m64 xmm xmm +// Construct and append a VMOVHPD instruction to the active function. +// Operates on the global context. +func VMOVHPD(ops ...operand.Op) { ctx.VMOVHPD(ops...) } + +// VMOVHPS: Move High Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMOVHPS xmm m64 +// VMOVHPS m64 xmm xmm +// Construct and append a VMOVHPS instruction to the active function. +func (c *Context) VMOVHPS(ops ...operand.Op) { + if inst, err := x86.VMOVHPS(ops...); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVHPS: Move High Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMOVHPS xmm m64 +// VMOVHPS m64 xmm xmm +// Construct and append a VMOVHPS instruction to the active function. +// Operates on the global context. +func VMOVHPS(ops ...operand.Op) { ctx.VMOVHPS(ops...) } + +// VMOVLHPS: Move Packed Single-Precision Floating-Point Values Low to High. +// +// Forms: +// +// VMOVLHPS xmm xmm xmm +// Construct and append a VMOVLHPS instruction to the active function. +func (c *Context) VMOVLHPS(x, x1, x2 operand.Op) { + if inst, err := x86.VMOVLHPS(x, x1, x2); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVLHPS: Move Packed Single-Precision Floating-Point Values Low to High. +// +// Forms: +// +// VMOVLHPS xmm xmm xmm +// Construct and append a VMOVLHPS instruction to the active function. +// Operates on the global context. +func VMOVLHPS(x, x1, x2 operand.Op) { ctx.VMOVLHPS(x, x1, x2) } + +// VMOVLPD: Move Low Packed Double-Precision Floating-Point Value. +// +// Forms: +// +// VMOVLPD xmm m64 +// VMOVLPD m64 xmm xmm +// Construct and append a VMOVLPD instruction to the active function. +func (c *Context) VMOVLPD(ops ...operand.Op) { + if inst, err := x86.VMOVLPD(ops...); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVLPD: Move Low Packed Double-Precision Floating-Point Value. +// +// Forms: +// +// VMOVLPD xmm m64 +// VMOVLPD m64 xmm xmm +// Construct and append a VMOVLPD instruction to the active function. +// Operates on the global context. +func VMOVLPD(ops ...operand.Op) { ctx.VMOVLPD(ops...) } + +// VMOVLPS: Move Low Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMOVLPS xmm m64 +// VMOVLPS m64 xmm xmm +// Construct and append a VMOVLPS instruction to the active function. +func (c *Context) VMOVLPS(ops ...operand.Op) { + if inst, err := x86.VMOVLPS(ops...); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVLPS: Move Low Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMOVLPS xmm m64 +// VMOVLPS m64 xmm xmm +// Construct and append a VMOVLPS instruction to the active function. +// Operates on the global context. +func VMOVLPS(ops ...operand.Op) { ctx.VMOVLPS(ops...) } + +// VMOVMSKPD: Extract Packed Double-Precision Floating-Point Sign Mask. +// +// Forms: +// +// VMOVMSKPD xmm r32 +// VMOVMSKPD ymm r32 +// Construct and append a VMOVMSKPD instruction to the active function. +func (c *Context) VMOVMSKPD(xy, r operand.Op) { + if inst, err := x86.VMOVMSKPD(xy, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVMSKPD: Extract Packed Double-Precision Floating-Point Sign Mask. +// +// Forms: +// +// VMOVMSKPD xmm r32 +// VMOVMSKPD ymm r32 +// Construct and append a VMOVMSKPD instruction to the active function. +// Operates on the global context. +func VMOVMSKPD(xy, r operand.Op) { ctx.VMOVMSKPD(xy, r) } + +// VMOVMSKPS: Extract Packed Single-Precision Floating-Point Sign Mask. +// +// Forms: +// +// VMOVMSKPS xmm r32 +// VMOVMSKPS ymm r32 +// Construct and append a VMOVMSKPS instruction to the active function. +func (c *Context) VMOVMSKPS(xy, r operand.Op) { + if inst, err := x86.VMOVMSKPS(xy, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVMSKPS: Extract Packed Single-Precision Floating-Point Sign Mask. +// +// Forms: +// +// VMOVMSKPS xmm r32 +// VMOVMSKPS ymm r32 +// Construct and append a VMOVMSKPS instruction to the active function. +// Operates on the global context. +func VMOVMSKPS(xy, r operand.Op) { ctx.VMOVMSKPS(xy, r) } + +// VMOVNTDQ: Store Double Quadword Using Non-Temporal Hint. +// +// Forms: +// +// VMOVNTDQ xmm m128 +// VMOVNTDQ ymm m256 +// Construct and append a VMOVNTDQ instruction to the active function. +func (c *Context) VMOVNTDQ(xy, m operand.Op) { + if inst, err := x86.VMOVNTDQ(xy, m); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVNTDQ: Store Double Quadword Using Non-Temporal Hint. +// +// Forms: +// +// VMOVNTDQ xmm m128 +// VMOVNTDQ ymm m256 +// Construct and append a VMOVNTDQ instruction to the active function. +// Operates on the global context. +func VMOVNTDQ(xy, m operand.Op) { ctx.VMOVNTDQ(xy, m) } + +// VMOVNTDQA: Load Double Quadword Non-Temporal Aligned Hint. +// +// Forms: +// +// VMOVNTDQA m128 xmm +// VMOVNTDQA m256 ymm +// Construct and append a VMOVNTDQA instruction to the active function. +func (c *Context) VMOVNTDQA(m, xy operand.Op) { + if inst, err := x86.VMOVNTDQA(m, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVNTDQA: Load Double Quadword Non-Temporal Aligned Hint. +// +// Forms: +// +// VMOVNTDQA m128 xmm +// VMOVNTDQA m256 ymm +// Construct and append a VMOVNTDQA instruction to the active function. +// Operates on the global context. +func VMOVNTDQA(m, xy operand.Op) { ctx.VMOVNTDQA(m, xy) } + +// VMOVNTPD: Store Packed Double-Precision Floating-Point Values Using Non-Temporal Hint. +// +// Forms: +// +// VMOVNTPD xmm m128 +// VMOVNTPD ymm m256 +// Construct and append a VMOVNTPD instruction to the active function. +func (c *Context) VMOVNTPD(xy, m operand.Op) { + if inst, err := x86.VMOVNTPD(xy, m); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVNTPD: Store Packed Double-Precision Floating-Point Values Using Non-Temporal Hint. +// +// Forms: +// +// VMOVNTPD xmm m128 +// VMOVNTPD ymm m256 +// Construct and append a VMOVNTPD instruction to the active function. +// Operates on the global context. +func VMOVNTPD(xy, m operand.Op) { ctx.VMOVNTPD(xy, m) } + +// VMOVNTPS: Store Packed Single-Precision Floating-Point Values Using Non-Temporal Hint. +// +// Forms: +// +// VMOVNTPS xmm m128 +// VMOVNTPS ymm m256 +// Construct and append a VMOVNTPS instruction to the active function. +func (c *Context) VMOVNTPS(xy, m operand.Op) { + if inst, err := x86.VMOVNTPS(xy, m); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVNTPS: Store Packed Single-Precision Floating-Point Values Using Non-Temporal Hint. +// +// Forms: +// +// VMOVNTPS xmm m128 +// VMOVNTPS ymm m256 +// Construct and append a VMOVNTPS instruction to the active function. +// Operates on the global context. +func VMOVNTPS(xy, m operand.Op) { ctx.VMOVNTPS(xy, m) } + +// VMOVQ: Move Quadword. +// +// Forms: +// +// VMOVQ xmm r64 +// VMOVQ r64 xmm +// VMOVQ xmm xmm +// VMOVQ m64 xmm +// VMOVQ xmm m64 +// Construct and append a VMOVQ instruction to the active function. +func (c *Context) VMOVQ(mrx, mrx1 operand.Op) { + if inst, err := x86.VMOVQ(mrx, mrx1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVQ: Move Quadword. +// +// Forms: +// +// VMOVQ xmm r64 +// VMOVQ r64 xmm +// VMOVQ xmm xmm +// VMOVQ m64 xmm +// VMOVQ xmm m64 +// Construct and append a VMOVQ instruction to the active function. +// Operates on the global context. +func VMOVQ(mrx, mrx1 operand.Op) { ctx.VMOVQ(mrx, mrx1) } + +// VMOVSD: Move Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// VMOVSD m64 xmm +// VMOVSD xmm m64 +// VMOVSD xmm xmm xmm +// Construct and append a VMOVSD instruction to the active function. +func (c *Context) VMOVSD(ops ...operand.Op) { + if inst, err := x86.VMOVSD(ops...); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVSD: Move Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// VMOVSD m64 xmm +// VMOVSD xmm m64 +// VMOVSD xmm xmm xmm +// Construct and append a VMOVSD instruction to the active function. +// Operates on the global context. +func VMOVSD(ops ...operand.Op) { ctx.VMOVSD(ops...) } + +// VMOVSHDUP: Move Packed Single-FP High and Duplicate. +// +// Forms: +// +// VMOVSHDUP xmm xmm +// VMOVSHDUP m128 xmm +// VMOVSHDUP ymm ymm +// VMOVSHDUP m256 ymm +// Construct and append a VMOVSHDUP instruction to the active function. +func (c *Context) VMOVSHDUP(mxy, xy operand.Op) { + if inst, err := x86.VMOVSHDUP(mxy, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVSHDUP: Move Packed Single-FP High and Duplicate. +// +// Forms: +// +// VMOVSHDUP xmm xmm +// VMOVSHDUP m128 xmm +// VMOVSHDUP ymm ymm +// VMOVSHDUP m256 ymm +// Construct and append a VMOVSHDUP instruction to the active function. +// Operates on the global context. +func VMOVSHDUP(mxy, xy operand.Op) { ctx.VMOVSHDUP(mxy, xy) } + +// VMOVSLDUP: Move Packed Single-FP Low and Duplicate. +// +// Forms: +// +// VMOVSLDUP xmm xmm +// VMOVSLDUP m128 xmm +// VMOVSLDUP ymm ymm +// VMOVSLDUP m256 ymm +// Construct and append a VMOVSLDUP instruction to the active function. +func (c *Context) VMOVSLDUP(mxy, xy operand.Op) { + if inst, err := x86.VMOVSLDUP(mxy, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVSLDUP: Move Packed Single-FP Low and Duplicate. +// +// Forms: +// +// VMOVSLDUP xmm xmm +// VMOVSLDUP m128 xmm +// VMOVSLDUP ymm ymm +// VMOVSLDUP m256 ymm +// Construct and append a VMOVSLDUP instruction to the active function. +// Operates on the global context. +func VMOVSLDUP(mxy, xy operand.Op) { ctx.VMOVSLDUP(mxy, xy) } + +// VMOVSS: Move Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VMOVSS m32 xmm +// VMOVSS xmm m32 +// VMOVSS xmm xmm xmm +// Construct and append a VMOVSS instruction to the active function. +func (c *Context) VMOVSS(ops ...operand.Op) { + if inst, err := x86.VMOVSS(ops...); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVSS: Move Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VMOVSS m32 xmm +// VMOVSS xmm m32 +// VMOVSS xmm xmm xmm +// Construct and append a VMOVSS instruction to the active function. +// Operates on the global context. +func VMOVSS(ops ...operand.Op) { ctx.VMOVSS(ops...) } + +// VMOVUPD: Move Unaligned Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VMOVUPD xmm xmm +// VMOVUPD m128 xmm +// VMOVUPD ymm ymm +// VMOVUPD m256 ymm +// VMOVUPD xmm m128 +// VMOVUPD ymm m256 +// Construct and append a VMOVUPD instruction to the active function. +func (c *Context) VMOVUPD(mxy, mxy1 operand.Op) { + if inst, err := x86.VMOVUPD(mxy, mxy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVUPD: Move Unaligned Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VMOVUPD xmm xmm +// VMOVUPD m128 xmm +// VMOVUPD ymm ymm +// VMOVUPD m256 ymm +// VMOVUPD xmm m128 +// VMOVUPD ymm m256 +// Construct and append a VMOVUPD instruction to the active function. +// Operates on the global context. +func VMOVUPD(mxy, mxy1 operand.Op) { ctx.VMOVUPD(mxy, mxy1) } + +// VMOVUPS: Move Unaligned Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMOVUPS xmm xmm +// VMOVUPS m128 xmm +// VMOVUPS ymm ymm +// VMOVUPS m256 ymm +// VMOVUPS xmm m128 +// VMOVUPS ymm m256 +// Construct and append a VMOVUPS instruction to the active function. +func (c *Context) VMOVUPS(mxy, mxy1 operand.Op) { + if inst, err := x86.VMOVUPS(mxy, mxy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMOVUPS: Move Unaligned Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMOVUPS xmm xmm +// VMOVUPS m128 xmm +// VMOVUPS ymm ymm +// VMOVUPS m256 ymm +// VMOVUPS xmm m128 +// VMOVUPS ymm m256 +// Construct and append a VMOVUPS instruction to the active function. +// Operates on the global context. +func VMOVUPS(mxy, mxy1 operand.Op) { ctx.VMOVUPS(mxy, mxy1) } + +// VMPSADBW: Compute Multiple Packed Sums of Absolute Difference. +// +// Forms: +// +// VMPSADBW imm8 xmm xmm xmm +// VMPSADBW imm8 m128 xmm xmm +// VMPSADBW imm8 ymm ymm ymm +// VMPSADBW imm8 m256 ymm ymm +// Construct and append a VMPSADBW instruction to the active function. +func (c *Context) VMPSADBW(i, mxy, xy, xy1 operand.Op) { + if inst, err := x86.VMPSADBW(i, mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMPSADBW: Compute Multiple Packed Sums of Absolute Difference. +// +// Forms: +// +// VMPSADBW imm8 xmm xmm xmm +// VMPSADBW imm8 m128 xmm xmm +// VMPSADBW imm8 ymm ymm ymm +// VMPSADBW imm8 m256 ymm ymm +// Construct and append a VMPSADBW instruction to the active function. +// Operates on the global context. +func VMPSADBW(i, mxy, xy, xy1 operand.Op) { ctx.VMPSADBW(i, mxy, xy, xy1) } + +// VMULPD: Multiply Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VMULPD xmm xmm xmm +// VMULPD m128 xmm xmm +// VMULPD ymm ymm ymm +// VMULPD m256 ymm ymm +// Construct and append a VMULPD instruction to the active function. +func (c *Context) VMULPD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VMULPD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMULPD: Multiply Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VMULPD xmm xmm xmm +// VMULPD m128 xmm xmm +// VMULPD ymm ymm ymm +// VMULPD m256 ymm ymm +// Construct and append a VMULPD instruction to the active function. +// Operates on the global context. +func VMULPD(mxy, xy, xy1 operand.Op) { ctx.VMULPD(mxy, xy, xy1) } + +// VMULPS: Multiply Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMULPS xmm xmm xmm +// VMULPS m128 xmm xmm +// VMULPS ymm ymm ymm +// VMULPS m256 ymm ymm +// Construct and append a VMULPS instruction to the active function. +func (c *Context) VMULPS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VMULPS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMULPS: Multiply Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMULPS xmm xmm xmm +// VMULPS m128 xmm xmm +// VMULPS ymm ymm ymm +// VMULPS m256 ymm ymm +// Construct and append a VMULPS instruction to the active function. +// Operates on the global context. +func VMULPS(mxy, xy, xy1 operand.Op) { ctx.VMULPS(mxy, xy, xy1) } + +// VMULSD: Multiply Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VMULSD xmm xmm xmm +// VMULSD m64 xmm xmm +// Construct and append a VMULSD instruction to the active function. +func (c *Context) VMULSD(mx, x, x1 operand.Op) { + if inst, err := x86.VMULSD(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMULSD: Multiply Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VMULSD xmm xmm xmm +// VMULSD m64 xmm xmm +// Construct and append a VMULSD instruction to the active function. +// Operates on the global context. +func VMULSD(mx, x, x1 operand.Op) { ctx.VMULSD(mx, x, x1) } + +// VMULSS: Multiply Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VMULSS xmm xmm xmm +// VMULSS m32 xmm xmm +// Construct and append a VMULSS instruction to the active function. +func (c *Context) VMULSS(mx, x, x1 operand.Op) { + if inst, err := x86.VMULSS(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VMULSS: Multiply Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VMULSS xmm xmm xmm +// VMULSS m32 xmm xmm +// Construct and append a VMULSS instruction to the active function. +// Operates on the global context. +func VMULSS(mx, x, x1 operand.Op) { ctx.VMULSS(mx, x, x1) } + +// VORPD: Bitwise Logical OR of Double-Precision Floating-Point Values. +// +// Forms: +// +// VORPD xmm xmm xmm +// VORPD m128 xmm xmm +// VORPD ymm ymm ymm +// VORPD m256 ymm ymm +// Construct and append a VORPD instruction to the active function. +func (c *Context) VORPD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VORPD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VORPD: Bitwise Logical OR of Double-Precision Floating-Point Values. +// +// Forms: +// +// VORPD xmm xmm xmm +// VORPD m128 xmm xmm +// VORPD ymm ymm ymm +// VORPD m256 ymm ymm +// Construct and append a VORPD instruction to the active function. +// Operates on the global context. +func VORPD(mxy, xy, xy1 operand.Op) { ctx.VORPD(mxy, xy, xy1) } + +// VORPS: Bitwise Logical OR of Single-Precision Floating-Point Values. +// +// Forms: +// +// VORPS xmm xmm xmm +// VORPS m128 xmm xmm +// VORPS ymm ymm ymm +// VORPS m256 ymm ymm +// Construct and append a VORPS instruction to the active function. +func (c *Context) VORPS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VORPS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VORPS: Bitwise Logical OR of Single-Precision Floating-Point Values. +// +// Forms: +// +// VORPS xmm xmm xmm +// VORPS m128 xmm xmm +// VORPS ymm ymm ymm +// VORPS m256 ymm ymm +// Construct and append a VORPS instruction to the active function. +// Operates on the global context. +func VORPS(mxy, xy, xy1 operand.Op) { ctx.VORPS(mxy, xy, xy1) } + +// VPABSB: Packed Absolute Value of Byte Integers. +// +// Forms: +// +// VPABSB xmm xmm +// VPABSB m128 xmm +// VPABSB ymm ymm +// VPABSB m256 ymm +// Construct and append a VPABSB instruction to the active function. +func (c *Context) VPABSB(mxy, xy operand.Op) { + if inst, err := x86.VPABSB(mxy, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPABSB: Packed Absolute Value of Byte Integers. +// +// Forms: +// +// VPABSB xmm xmm +// VPABSB m128 xmm +// VPABSB ymm ymm +// VPABSB m256 ymm +// Construct and append a VPABSB instruction to the active function. +// Operates on the global context. +func VPABSB(mxy, xy operand.Op) { ctx.VPABSB(mxy, xy) } + +// VPABSD: Packed Absolute Value of Doubleword Integers. +// +// Forms: +// +// VPABSD xmm xmm +// VPABSD m128 xmm +// VPABSD ymm ymm +// VPABSD m256 ymm +// Construct and append a VPABSD instruction to the active function. +func (c *Context) VPABSD(mxy, xy operand.Op) { + if inst, err := x86.VPABSD(mxy, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPABSD: Packed Absolute Value of Doubleword Integers. +// +// Forms: +// +// VPABSD xmm xmm +// VPABSD m128 xmm +// VPABSD ymm ymm +// VPABSD m256 ymm +// Construct and append a VPABSD instruction to the active function. +// Operates on the global context. +func VPABSD(mxy, xy operand.Op) { ctx.VPABSD(mxy, xy) } + +// VPABSW: Packed Absolute Value of Word Integers. +// +// Forms: +// +// VPABSW xmm xmm +// VPABSW m128 xmm +// VPABSW ymm ymm +// VPABSW m256 ymm +// Construct and append a VPABSW instruction to the active function. +func (c *Context) VPABSW(mxy, xy operand.Op) { + if inst, err := x86.VPABSW(mxy, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPABSW: Packed Absolute Value of Word Integers. +// +// Forms: +// +// VPABSW xmm xmm +// VPABSW m128 xmm +// VPABSW ymm ymm +// VPABSW m256 ymm +// Construct and append a VPABSW instruction to the active function. +// Operates on the global context. +func VPABSW(mxy, xy operand.Op) { ctx.VPABSW(mxy, xy) } + +// VPACKSSDW: Pack Doublewords into Words with Signed Saturation. +// +// Forms: +// +// VPACKSSDW xmm xmm xmm +// VPACKSSDW m128 xmm xmm +// VPACKSSDW ymm ymm ymm +// VPACKSSDW m256 ymm ymm +// Construct and append a VPACKSSDW instruction to the active function. +func (c *Context) VPACKSSDW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPACKSSDW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPACKSSDW: Pack Doublewords into Words with Signed Saturation. +// +// Forms: +// +// VPACKSSDW xmm xmm xmm +// VPACKSSDW m128 xmm xmm +// VPACKSSDW ymm ymm ymm +// VPACKSSDW m256 ymm ymm +// Construct and append a VPACKSSDW instruction to the active function. +// Operates on the global context. +func VPACKSSDW(mxy, xy, xy1 operand.Op) { ctx.VPACKSSDW(mxy, xy, xy1) } + +// VPACKSSWB: Pack Words into Bytes with Signed Saturation. +// +// Forms: +// +// VPACKSSWB xmm xmm xmm +// VPACKSSWB m128 xmm xmm +// VPACKSSWB ymm ymm ymm +// VPACKSSWB m256 ymm ymm +// Construct and append a VPACKSSWB instruction to the active function. +func (c *Context) VPACKSSWB(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPACKSSWB(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPACKSSWB: Pack Words into Bytes with Signed Saturation. +// +// Forms: +// +// VPACKSSWB xmm xmm xmm +// VPACKSSWB m128 xmm xmm +// VPACKSSWB ymm ymm ymm +// VPACKSSWB m256 ymm ymm +// Construct and append a VPACKSSWB instruction to the active function. +// Operates on the global context. +func VPACKSSWB(mxy, xy, xy1 operand.Op) { ctx.VPACKSSWB(mxy, xy, xy1) } + +// VPACKUSDW: Pack Doublewords into Words with Unsigned Saturation. +// +// Forms: +// +// VPACKUSDW xmm xmm xmm +// VPACKUSDW m128 xmm xmm +// VPACKUSDW ymm ymm ymm +// VPACKUSDW m256 ymm ymm +// Construct and append a VPACKUSDW instruction to the active function. +func (c *Context) VPACKUSDW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPACKUSDW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPACKUSDW: Pack Doublewords into Words with Unsigned Saturation. +// +// Forms: +// +// VPACKUSDW xmm xmm xmm +// VPACKUSDW m128 xmm xmm +// VPACKUSDW ymm ymm ymm +// VPACKUSDW m256 ymm ymm +// Construct and append a VPACKUSDW instruction to the active function. +// Operates on the global context. +func VPACKUSDW(mxy, xy, xy1 operand.Op) { ctx.VPACKUSDW(mxy, xy, xy1) } + +// VPACKUSWB: Pack Words into Bytes with Unsigned Saturation. +// +// Forms: +// +// VPACKUSWB xmm xmm xmm +// VPACKUSWB m128 xmm xmm +// VPACKUSWB ymm ymm ymm +// VPACKUSWB m256 ymm ymm +// Construct and append a VPACKUSWB instruction to the active function. +func (c *Context) VPACKUSWB(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPACKUSWB(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPACKUSWB: Pack Words into Bytes with Unsigned Saturation. +// +// Forms: +// +// VPACKUSWB xmm xmm xmm +// VPACKUSWB m128 xmm xmm +// VPACKUSWB ymm ymm ymm +// VPACKUSWB m256 ymm ymm +// Construct and append a VPACKUSWB instruction to the active function. +// Operates on the global context. +func VPACKUSWB(mxy, xy, xy1 operand.Op) { ctx.VPACKUSWB(mxy, xy, xy1) } + +// VPADDB: Add Packed Byte Integers. +// +// Forms: +// +// VPADDB xmm xmm xmm +// VPADDB m128 xmm xmm +// VPADDB ymm ymm ymm +// VPADDB m256 ymm ymm +// Construct and append a VPADDB instruction to the active function. +func (c *Context) VPADDB(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPADDB(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPADDB: Add Packed Byte Integers. +// +// Forms: +// +// VPADDB xmm xmm xmm +// VPADDB m128 xmm xmm +// VPADDB ymm ymm ymm +// VPADDB m256 ymm ymm +// Construct and append a VPADDB instruction to the active function. +// Operates on the global context. +func VPADDB(mxy, xy, xy1 operand.Op) { ctx.VPADDB(mxy, xy, xy1) } + +// VPADDD: Add Packed Doubleword Integers. +// +// Forms: +// +// VPADDD xmm xmm xmm +// VPADDD m128 xmm xmm +// VPADDD ymm ymm ymm +// VPADDD m256 ymm ymm +// Construct and append a VPADDD instruction to the active function. +func (c *Context) VPADDD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPADDD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPADDD: Add Packed Doubleword Integers. +// +// Forms: +// +// VPADDD xmm xmm xmm +// VPADDD m128 xmm xmm +// VPADDD ymm ymm ymm +// VPADDD m256 ymm ymm +// Construct and append a VPADDD instruction to the active function. +// Operates on the global context. +func VPADDD(mxy, xy, xy1 operand.Op) { ctx.VPADDD(mxy, xy, xy1) } + +// VPADDQ: Add Packed Quadword Integers. +// +// Forms: +// +// VPADDQ xmm xmm xmm +// VPADDQ m128 xmm xmm +// VPADDQ ymm ymm ymm +// VPADDQ m256 ymm ymm +// Construct and append a VPADDQ instruction to the active function. +func (c *Context) VPADDQ(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPADDQ(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPADDQ: Add Packed Quadword Integers. +// +// Forms: +// +// VPADDQ xmm xmm xmm +// VPADDQ m128 xmm xmm +// VPADDQ ymm ymm ymm +// VPADDQ m256 ymm ymm +// Construct and append a VPADDQ instruction to the active function. +// Operates on the global context. +func VPADDQ(mxy, xy, xy1 operand.Op) { ctx.VPADDQ(mxy, xy, xy1) } + +// VPADDSB: Add Packed Signed Byte Integers with Signed Saturation. +// +// Forms: +// +// VPADDSB xmm xmm xmm +// VPADDSB m128 xmm xmm +// VPADDSB ymm ymm ymm +// VPADDSB m256 ymm ymm +// Construct and append a VPADDSB instruction to the active function. +func (c *Context) VPADDSB(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPADDSB(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPADDSB: Add Packed Signed Byte Integers with Signed Saturation. +// +// Forms: +// +// VPADDSB xmm xmm xmm +// VPADDSB m128 xmm xmm +// VPADDSB ymm ymm ymm +// VPADDSB m256 ymm ymm +// Construct and append a VPADDSB instruction to the active function. +// Operates on the global context. +func VPADDSB(mxy, xy, xy1 operand.Op) { ctx.VPADDSB(mxy, xy, xy1) } + +// VPADDSW: Add Packed Signed Word Integers with Signed Saturation. +// +// Forms: +// +// VPADDSW xmm xmm xmm +// VPADDSW m128 xmm xmm +// VPADDSW ymm ymm ymm +// VPADDSW m256 ymm ymm +// Construct and append a VPADDSW instruction to the active function. +func (c *Context) VPADDSW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPADDSW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPADDSW: Add Packed Signed Word Integers with Signed Saturation. +// +// Forms: +// +// VPADDSW xmm xmm xmm +// VPADDSW m128 xmm xmm +// VPADDSW ymm ymm ymm +// VPADDSW m256 ymm ymm +// Construct and append a VPADDSW instruction to the active function. +// Operates on the global context. +func VPADDSW(mxy, xy, xy1 operand.Op) { ctx.VPADDSW(mxy, xy, xy1) } + +// VPADDUSB: Add Packed Unsigned Byte Integers with Unsigned Saturation. +// +// Forms: +// +// VPADDUSB xmm xmm xmm +// VPADDUSB m128 xmm xmm +// VPADDUSB ymm ymm ymm +// VPADDUSB m256 ymm ymm +// Construct and append a VPADDUSB instruction to the active function. +func (c *Context) VPADDUSB(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPADDUSB(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPADDUSB: Add Packed Unsigned Byte Integers with Unsigned Saturation. +// +// Forms: +// +// VPADDUSB xmm xmm xmm +// VPADDUSB m128 xmm xmm +// VPADDUSB ymm ymm ymm +// VPADDUSB m256 ymm ymm +// Construct and append a VPADDUSB instruction to the active function. +// Operates on the global context. +func VPADDUSB(mxy, xy, xy1 operand.Op) { ctx.VPADDUSB(mxy, xy, xy1) } + +// VPADDUSW: Add Packed Unsigned Word Integers with Unsigned Saturation. +// +// Forms: +// +// VPADDUSW xmm xmm xmm +// VPADDUSW m128 xmm xmm +// VPADDUSW ymm ymm ymm +// VPADDUSW m256 ymm ymm +// Construct and append a VPADDUSW instruction to the active function. +func (c *Context) VPADDUSW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPADDUSW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPADDUSW: Add Packed Unsigned Word Integers with Unsigned Saturation. +// +// Forms: +// +// VPADDUSW xmm xmm xmm +// VPADDUSW m128 xmm xmm +// VPADDUSW ymm ymm ymm +// VPADDUSW m256 ymm ymm +// Construct and append a VPADDUSW instruction to the active function. +// Operates on the global context. +func VPADDUSW(mxy, xy, xy1 operand.Op) { ctx.VPADDUSW(mxy, xy, xy1) } + +// VPADDW: Add Packed Word Integers. +// +// Forms: +// +// VPADDW xmm xmm xmm +// VPADDW m128 xmm xmm +// VPADDW ymm ymm ymm +// VPADDW m256 ymm ymm +// Construct and append a VPADDW instruction to the active function. +func (c *Context) VPADDW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPADDW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPADDW: Add Packed Word Integers. +// +// Forms: +// +// VPADDW xmm xmm xmm +// VPADDW m128 xmm xmm +// VPADDW ymm ymm ymm +// VPADDW m256 ymm ymm +// Construct and append a VPADDW instruction to the active function. +// Operates on the global context. +func VPADDW(mxy, xy, xy1 operand.Op) { ctx.VPADDW(mxy, xy, xy1) } + +// VPALIGNR: Packed Align Right. +// +// Forms: +// +// VPALIGNR imm8 xmm xmm xmm +// VPALIGNR imm8 m128 xmm xmm +// VPALIGNR imm8 ymm ymm ymm +// VPALIGNR imm8 m256 ymm ymm +// Construct and append a VPALIGNR instruction to the active function. +func (c *Context) VPALIGNR(i, mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPALIGNR(i, mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPALIGNR: Packed Align Right. +// +// Forms: +// +// VPALIGNR imm8 xmm xmm xmm +// VPALIGNR imm8 m128 xmm xmm +// VPALIGNR imm8 ymm ymm ymm +// VPALIGNR imm8 m256 ymm ymm +// Construct and append a VPALIGNR instruction to the active function. +// Operates on the global context. +func VPALIGNR(i, mxy, xy, xy1 operand.Op) { ctx.VPALIGNR(i, mxy, xy, xy1) } + +// VPAND: Packed Bitwise Logical AND. +// +// Forms: +// +// VPAND xmm xmm xmm +// VPAND m128 xmm xmm +// VPAND ymm ymm ymm +// VPAND m256 ymm ymm +// Construct and append a VPAND instruction to the active function. +func (c *Context) VPAND(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPAND(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPAND: Packed Bitwise Logical AND. +// +// Forms: +// +// VPAND xmm xmm xmm +// VPAND m128 xmm xmm +// VPAND ymm ymm ymm +// VPAND m256 ymm ymm +// Construct and append a VPAND instruction to the active function. +// Operates on the global context. +func VPAND(mxy, xy, xy1 operand.Op) { ctx.VPAND(mxy, xy, xy1) } + +// VPANDN: Packed Bitwise Logical AND NOT. +// +// Forms: +// +// VPANDN xmm xmm xmm +// VPANDN m128 xmm xmm +// VPANDN ymm ymm ymm +// VPANDN m256 ymm ymm +// Construct and append a VPANDN instruction to the active function. +func (c *Context) VPANDN(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPANDN(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPANDN: Packed Bitwise Logical AND NOT. +// +// Forms: +// +// VPANDN xmm xmm xmm +// VPANDN m128 xmm xmm +// VPANDN ymm ymm ymm +// VPANDN m256 ymm ymm +// Construct and append a VPANDN instruction to the active function. +// Operates on the global context. +func VPANDN(mxy, xy, xy1 operand.Op) { ctx.VPANDN(mxy, xy, xy1) } + +// VPAVGB: Average Packed Byte Integers. +// +// Forms: +// +// VPAVGB xmm xmm xmm +// VPAVGB m128 xmm xmm +// VPAVGB ymm ymm ymm +// VPAVGB m256 ymm ymm +// Construct and append a VPAVGB instruction to the active function. +func (c *Context) VPAVGB(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPAVGB(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPAVGB: Average Packed Byte Integers. +// +// Forms: +// +// VPAVGB xmm xmm xmm +// VPAVGB m128 xmm xmm +// VPAVGB ymm ymm ymm +// VPAVGB m256 ymm ymm +// Construct and append a VPAVGB instruction to the active function. +// Operates on the global context. +func VPAVGB(mxy, xy, xy1 operand.Op) { ctx.VPAVGB(mxy, xy, xy1) } + +// VPAVGW: Average Packed Word Integers. +// +// Forms: +// +// VPAVGW xmm xmm xmm +// VPAVGW m128 xmm xmm +// VPAVGW ymm ymm ymm +// VPAVGW m256 ymm ymm +// Construct and append a VPAVGW instruction to the active function. +func (c *Context) VPAVGW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPAVGW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPAVGW: Average Packed Word Integers. +// +// Forms: +// +// VPAVGW xmm xmm xmm +// VPAVGW m128 xmm xmm +// VPAVGW ymm ymm ymm +// VPAVGW m256 ymm ymm +// Construct and append a VPAVGW instruction to the active function. +// Operates on the global context. +func VPAVGW(mxy, xy, xy1 operand.Op) { ctx.VPAVGW(mxy, xy, xy1) } + +// VPBLENDD: Blend Packed Doublewords. +// +// Forms: +// +// VPBLENDD imm8 xmm xmm xmm +// VPBLENDD imm8 m128 xmm xmm +// VPBLENDD imm8 ymm ymm ymm +// VPBLENDD imm8 m256 ymm ymm +// Construct and append a VPBLENDD instruction to the active function. +func (c *Context) VPBLENDD(i, mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPBLENDD(i, mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPBLENDD: Blend Packed Doublewords. +// +// Forms: +// +// VPBLENDD imm8 xmm xmm xmm +// VPBLENDD imm8 m128 xmm xmm +// VPBLENDD imm8 ymm ymm ymm +// VPBLENDD imm8 m256 ymm ymm +// Construct and append a VPBLENDD instruction to the active function. +// Operates on the global context. +func VPBLENDD(i, mxy, xy, xy1 operand.Op) { ctx.VPBLENDD(i, mxy, xy, xy1) } + +// VPBLENDVB: Variable Blend Packed Bytes. +// +// Forms: +// +// VPBLENDVB xmm xmm xmm xmm +// VPBLENDVB xmm m128 xmm xmm +// VPBLENDVB ymm ymm ymm ymm +// VPBLENDVB ymm m256 ymm ymm +// Construct and append a VPBLENDVB instruction to the active function. +func (c *Context) VPBLENDVB(xy, mxy, xy1, xy2 operand.Op) { + if inst, err := x86.VPBLENDVB(xy, mxy, xy1, xy2); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPBLENDVB: Variable Blend Packed Bytes. +// +// Forms: +// +// VPBLENDVB xmm xmm xmm xmm +// VPBLENDVB xmm m128 xmm xmm +// VPBLENDVB ymm ymm ymm ymm +// VPBLENDVB ymm m256 ymm ymm +// Construct and append a VPBLENDVB instruction to the active function. +// Operates on the global context. +func VPBLENDVB(xy, mxy, xy1, xy2 operand.Op) { ctx.VPBLENDVB(xy, mxy, xy1, xy2) } + +// VPBLENDW: Blend Packed Words. +// +// Forms: +// +// VPBLENDW imm8 xmm xmm xmm +// VPBLENDW imm8 m128 xmm xmm +// VPBLENDW imm8 ymm ymm ymm +// VPBLENDW imm8 m256 ymm ymm +// Construct and append a VPBLENDW instruction to the active function. +func (c *Context) VPBLENDW(i, mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPBLENDW(i, mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPBLENDW: Blend Packed Words. +// +// Forms: +// +// VPBLENDW imm8 xmm xmm xmm +// VPBLENDW imm8 m128 xmm xmm +// VPBLENDW imm8 ymm ymm ymm +// VPBLENDW imm8 m256 ymm ymm +// Construct and append a VPBLENDW instruction to the active function. +// Operates on the global context. +func VPBLENDW(i, mxy, xy, xy1 operand.Op) { ctx.VPBLENDW(i, mxy, xy, xy1) } + +// VPBROADCASTB: Broadcast Byte Integer. +// +// Forms: +// +// VPBROADCASTB xmm xmm +// VPBROADCASTB m8 xmm +// VPBROADCASTB xmm ymm +// VPBROADCASTB m8 ymm +// Construct and append a VPBROADCASTB instruction to the active function. +func (c *Context) VPBROADCASTB(mx, xy operand.Op) { + if inst, err := x86.VPBROADCASTB(mx, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPBROADCASTB: Broadcast Byte Integer. +// +// Forms: +// +// VPBROADCASTB xmm xmm +// VPBROADCASTB m8 xmm +// VPBROADCASTB xmm ymm +// VPBROADCASTB m8 ymm +// Construct and append a VPBROADCASTB instruction to the active function. +// Operates on the global context. +func VPBROADCASTB(mx, xy operand.Op) { ctx.VPBROADCASTB(mx, xy) } + +// VPBROADCASTD: Broadcast Doubleword Integer. +// +// Forms: +// +// VPBROADCASTD xmm xmm +// VPBROADCASTD m32 xmm +// VPBROADCASTD xmm ymm +// VPBROADCASTD m32 ymm +// Construct and append a VPBROADCASTD instruction to the active function. +func (c *Context) VPBROADCASTD(mx, xy operand.Op) { + if inst, err := x86.VPBROADCASTD(mx, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPBROADCASTD: Broadcast Doubleword Integer. +// +// Forms: +// +// VPBROADCASTD xmm xmm +// VPBROADCASTD m32 xmm +// VPBROADCASTD xmm ymm +// VPBROADCASTD m32 ymm +// Construct and append a VPBROADCASTD instruction to the active function. +// Operates on the global context. +func VPBROADCASTD(mx, xy operand.Op) { ctx.VPBROADCASTD(mx, xy) } + +// VPBROADCASTQ: Broadcast Quadword Integer. +// +// Forms: +// +// VPBROADCASTQ xmm xmm +// VPBROADCASTQ m64 xmm +// VPBROADCASTQ xmm ymm +// VPBROADCASTQ m64 ymm +// Construct and append a VPBROADCASTQ instruction to the active function. +func (c *Context) VPBROADCASTQ(mx, xy operand.Op) { + if inst, err := x86.VPBROADCASTQ(mx, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPBROADCASTQ: Broadcast Quadword Integer. +// +// Forms: +// +// VPBROADCASTQ xmm xmm +// VPBROADCASTQ m64 xmm +// VPBROADCASTQ xmm ymm +// VPBROADCASTQ m64 ymm +// Construct and append a VPBROADCASTQ instruction to the active function. +// Operates on the global context. +func VPBROADCASTQ(mx, xy operand.Op) { ctx.VPBROADCASTQ(mx, xy) } + +// VPBROADCASTW: Broadcast Word Integer. +// +// Forms: +// +// VPBROADCASTW xmm xmm +// VPBROADCASTW m16 xmm +// VPBROADCASTW xmm ymm +// VPBROADCASTW m16 ymm +// Construct and append a VPBROADCASTW instruction to the active function. +func (c *Context) VPBROADCASTW(mx, xy operand.Op) { + if inst, err := x86.VPBROADCASTW(mx, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPBROADCASTW: Broadcast Word Integer. +// +// Forms: +// +// VPBROADCASTW xmm xmm +// VPBROADCASTW m16 xmm +// VPBROADCASTW xmm ymm +// VPBROADCASTW m16 ymm +// Construct and append a VPBROADCASTW instruction to the active function. +// Operates on the global context. +func VPBROADCASTW(mx, xy operand.Op) { ctx.VPBROADCASTW(mx, xy) } + +// VPCLMULQDQ: Carry-Less Quadword Multiplication. +// +// Forms: +// +// VPCLMULQDQ imm8 xmm xmm xmm +// VPCLMULQDQ imm8 m128 xmm xmm +// Construct and append a VPCLMULQDQ instruction to the active function. +func (c *Context) VPCLMULQDQ(i, mx, x, x1 operand.Op) { + if inst, err := x86.VPCLMULQDQ(i, mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPCLMULQDQ: Carry-Less Quadword Multiplication. +// +// Forms: +// +// VPCLMULQDQ imm8 xmm xmm xmm +// VPCLMULQDQ imm8 m128 xmm xmm +// Construct and append a VPCLMULQDQ instruction to the active function. +// Operates on the global context. +func VPCLMULQDQ(i, mx, x, x1 operand.Op) { ctx.VPCLMULQDQ(i, mx, x, x1) } + +// VPCMPEQB: Compare Packed Byte Data for Equality. +// +// Forms: +// +// VPCMPEQB xmm xmm xmm +// VPCMPEQB m128 xmm xmm +// VPCMPEQB ymm ymm ymm +// VPCMPEQB m256 ymm ymm +// Construct and append a VPCMPEQB instruction to the active function. +func (c *Context) VPCMPEQB(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPCMPEQB(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPCMPEQB: Compare Packed Byte Data for Equality. +// +// Forms: +// +// VPCMPEQB xmm xmm xmm +// VPCMPEQB m128 xmm xmm +// VPCMPEQB ymm ymm ymm +// VPCMPEQB m256 ymm ymm +// Construct and append a VPCMPEQB instruction to the active function. +// Operates on the global context. +func VPCMPEQB(mxy, xy, xy1 operand.Op) { ctx.VPCMPEQB(mxy, xy, xy1) } + +// VPCMPEQD: Compare Packed Doubleword Data for Equality. +// +// Forms: +// +// VPCMPEQD xmm xmm xmm +// VPCMPEQD m128 xmm xmm +// VPCMPEQD ymm ymm ymm +// VPCMPEQD m256 ymm ymm +// Construct and append a VPCMPEQD instruction to the active function. +func (c *Context) VPCMPEQD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPCMPEQD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPCMPEQD: Compare Packed Doubleword Data for Equality. +// +// Forms: +// +// VPCMPEQD xmm xmm xmm +// VPCMPEQD m128 xmm xmm +// VPCMPEQD ymm ymm ymm +// VPCMPEQD m256 ymm ymm +// Construct and append a VPCMPEQD instruction to the active function. +// Operates on the global context. +func VPCMPEQD(mxy, xy, xy1 operand.Op) { ctx.VPCMPEQD(mxy, xy, xy1) } + +// VPCMPEQQ: Compare Packed Quadword Data for Equality. +// +// Forms: +// +// VPCMPEQQ xmm xmm xmm +// VPCMPEQQ m128 xmm xmm +// VPCMPEQQ ymm ymm ymm +// VPCMPEQQ m256 ymm ymm +// Construct and append a VPCMPEQQ instruction to the active function. +func (c *Context) VPCMPEQQ(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPCMPEQQ(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPCMPEQQ: Compare Packed Quadword Data for Equality. +// +// Forms: +// +// VPCMPEQQ xmm xmm xmm +// VPCMPEQQ m128 xmm xmm +// VPCMPEQQ ymm ymm ymm +// VPCMPEQQ m256 ymm ymm +// Construct and append a VPCMPEQQ instruction to the active function. +// Operates on the global context. +func VPCMPEQQ(mxy, xy, xy1 operand.Op) { ctx.VPCMPEQQ(mxy, xy, xy1) } + +// VPCMPEQW: Compare Packed Word Data for Equality. +// +// Forms: +// +// VPCMPEQW xmm xmm xmm +// VPCMPEQW m128 xmm xmm +// VPCMPEQW ymm ymm ymm +// VPCMPEQW m256 ymm ymm +// Construct and append a VPCMPEQW instruction to the active function. +func (c *Context) VPCMPEQW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPCMPEQW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPCMPEQW: Compare Packed Word Data for Equality. +// +// Forms: +// +// VPCMPEQW xmm xmm xmm +// VPCMPEQW m128 xmm xmm +// VPCMPEQW ymm ymm ymm +// VPCMPEQW m256 ymm ymm +// Construct and append a VPCMPEQW instruction to the active function. +// Operates on the global context. +func VPCMPEQW(mxy, xy, xy1 operand.Op) { ctx.VPCMPEQW(mxy, xy, xy1) } + +// VPCMPESTRI: Packed Compare Explicit Length Strings, Return Index. +// +// Forms: +// +// VPCMPESTRI imm8 xmm xmm +// VPCMPESTRI imm8 m128 xmm +// Construct and append a VPCMPESTRI instruction to the active function. +func (c *Context) VPCMPESTRI(i, mx, x operand.Op) { + if inst, err := x86.VPCMPESTRI(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPCMPESTRI: Packed Compare Explicit Length Strings, Return Index. +// +// Forms: +// +// VPCMPESTRI imm8 xmm xmm +// VPCMPESTRI imm8 m128 xmm +// Construct and append a VPCMPESTRI instruction to the active function. +// Operates on the global context. +func VPCMPESTRI(i, mx, x operand.Op) { ctx.VPCMPESTRI(i, mx, x) } + +// VPCMPESTRM: Packed Compare Explicit Length Strings, Return Mask. +// +// Forms: +// +// VPCMPESTRM imm8 xmm xmm +// VPCMPESTRM imm8 m128 xmm +// Construct and append a VPCMPESTRM instruction to the active function. +func (c *Context) VPCMPESTRM(i, mx, x operand.Op) { + if inst, err := x86.VPCMPESTRM(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPCMPESTRM: Packed Compare Explicit Length Strings, Return Mask. +// +// Forms: +// +// VPCMPESTRM imm8 xmm xmm +// VPCMPESTRM imm8 m128 xmm +// Construct and append a VPCMPESTRM instruction to the active function. +// Operates on the global context. +func VPCMPESTRM(i, mx, x operand.Op) { ctx.VPCMPESTRM(i, mx, x) } + +// VPCMPGTB: Compare Packed Signed Byte Integers for Greater Than. +// +// Forms: +// +// VPCMPGTB xmm xmm xmm +// VPCMPGTB m128 xmm xmm +// VPCMPGTB ymm ymm ymm +// VPCMPGTB m256 ymm ymm +// Construct and append a VPCMPGTB instruction to the active function. +func (c *Context) VPCMPGTB(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPCMPGTB(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPCMPGTB: Compare Packed Signed Byte Integers for Greater Than. +// +// Forms: +// +// VPCMPGTB xmm xmm xmm +// VPCMPGTB m128 xmm xmm +// VPCMPGTB ymm ymm ymm +// VPCMPGTB m256 ymm ymm +// Construct and append a VPCMPGTB instruction to the active function. +// Operates on the global context. +func VPCMPGTB(mxy, xy, xy1 operand.Op) { ctx.VPCMPGTB(mxy, xy, xy1) } + +// VPCMPGTD: Compare Packed Signed Doubleword Integers for Greater Than. +// +// Forms: +// +// VPCMPGTD xmm xmm xmm +// VPCMPGTD m128 xmm xmm +// VPCMPGTD ymm ymm ymm +// VPCMPGTD m256 ymm ymm +// Construct and append a VPCMPGTD instruction to the active function. +func (c *Context) VPCMPGTD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPCMPGTD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPCMPGTD: Compare Packed Signed Doubleword Integers for Greater Than. +// +// Forms: +// +// VPCMPGTD xmm xmm xmm +// VPCMPGTD m128 xmm xmm +// VPCMPGTD ymm ymm ymm +// VPCMPGTD m256 ymm ymm +// Construct and append a VPCMPGTD instruction to the active function. +// Operates on the global context. +func VPCMPGTD(mxy, xy, xy1 operand.Op) { ctx.VPCMPGTD(mxy, xy, xy1) } + +// VPCMPGTQ: Compare Packed Data for Greater Than. +// +// Forms: +// +// VPCMPGTQ xmm xmm xmm +// VPCMPGTQ m128 xmm xmm +// VPCMPGTQ ymm ymm ymm +// VPCMPGTQ m256 ymm ymm +// Construct and append a VPCMPGTQ instruction to the active function. +func (c *Context) VPCMPGTQ(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPCMPGTQ(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPCMPGTQ: Compare Packed Data for Greater Than. +// +// Forms: +// +// VPCMPGTQ xmm xmm xmm +// VPCMPGTQ m128 xmm xmm +// VPCMPGTQ ymm ymm ymm +// VPCMPGTQ m256 ymm ymm +// Construct and append a VPCMPGTQ instruction to the active function. +// Operates on the global context. +func VPCMPGTQ(mxy, xy, xy1 operand.Op) { ctx.VPCMPGTQ(mxy, xy, xy1) } + +// VPCMPGTW: Compare Packed Signed Word Integers for Greater Than. +// +// Forms: +// +// VPCMPGTW xmm xmm xmm +// VPCMPGTW m128 xmm xmm +// VPCMPGTW ymm ymm ymm +// VPCMPGTW m256 ymm ymm +// Construct and append a VPCMPGTW instruction to the active function. +func (c *Context) VPCMPGTW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPCMPGTW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPCMPGTW: Compare Packed Signed Word Integers for Greater Than. +// +// Forms: +// +// VPCMPGTW xmm xmm xmm +// VPCMPGTW m128 xmm xmm +// VPCMPGTW ymm ymm ymm +// VPCMPGTW m256 ymm ymm +// Construct and append a VPCMPGTW instruction to the active function. +// Operates on the global context. +func VPCMPGTW(mxy, xy, xy1 operand.Op) { ctx.VPCMPGTW(mxy, xy, xy1) } + +// VPCMPISTRI: Packed Compare Implicit Length Strings, Return Index. +// +// Forms: +// +// VPCMPISTRI imm8 xmm xmm +// VPCMPISTRI imm8 m128 xmm +// Construct and append a VPCMPISTRI instruction to the active function. +func (c *Context) VPCMPISTRI(i, mx, x operand.Op) { + if inst, err := x86.VPCMPISTRI(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPCMPISTRI: Packed Compare Implicit Length Strings, Return Index. +// +// Forms: +// +// VPCMPISTRI imm8 xmm xmm +// VPCMPISTRI imm8 m128 xmm +// Construct and append a VPCMPISTRI instruction to the active function. +// Operates on the global context. +func VPCMPISTRI(i, mx, x operand.Op) { ctx.VPCMPISTRI(i, mx, x) } + +// VPCMPISTRM: Packed Compare Implicit Length Strings, Return Mask. +// +// Forms: +// +// VPCMPISTRM imm8 xmm xmm +// VPCMPISTRM imm8 m128 xmm +// Construct and append a VPCMPISTRM instruction to the active function. +func (c *Context) VPCMPISTRM(i, mx, x operand.Op) { + if inst, err := x86.VPCMPISTRM(i, mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPCMPISTRM: Packed Compare Implicit Length Strings, Return Mask. +// +// Forms: +// +// VPCMPISTRM imm8 xmm xmm +// VPCMPISTRM imm8 m128 xmm +// Construct and append a VPCMPISTRM instruction to the active function. +// Operates on the global context. +func VPCMPISTRM(i, mx, x operand.Op) { ctx.VPCMPISTRM(i, mx, x) } + +// VPERM2F128: Permute Floating-Point Values. +// +// Forms: +// +// VPERM2F128 imm8 ymm ymm ymm +// VPERM2F128 imm8 m256 ymm ymm +// Construct and append a VPERM2F128 instruction to the active function. +func (c *Context) VPERM2F128(i, my, y, y1 operand.Op) { + if inst, err := x86.VPERM2F128(i, my, y, y1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPERM2F128: Permute Floating-Point Values. +// +// Forms: +// +// VPERM2F128 imm8 ymm ymm ymm +// VPERM2F128 imm8 m256 ymm ymm +// Construct and append a VPERM2F128 instruction to the active function. +// Operates on the global context. +func VPERM2F128(i, my, y, y1 operand.Op) { ctx.VPERM2F128(i, my, y, y1) } + +// VPERM2I128: Permute 128-Bit Integer Values. +// +// Forms: +// +// VPERM2I128 imm8 ymm ymm ymm +// VPERM2I128 imm8 m256 ymm ymm +// Construct and append a VPERM2I128 instruction to the active function. +func (c *Context) VPERM2I128(i, my, y, y1 operand.Op) { + if inst, err := x86.VPERM2I128(i, my, y, y1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPERM2I128: Permute 128-Bit Integer Values. +// +// Forms: +// +// VPERM2I128 imm8 ymm ymm ymm +// VPERM2I128 imm8 m256 ymm ymm +// Construct and append a VPERM2I128 instruction to the active function. +// Operates on the global context. +func VPERM2I128(i, my, y, y1 operand.Op) { ctx.VPERM2I128(i, my, y, y1) } + +// VPERMD: Permute Doubleword Integers. +// +// Forms: +// +// VPERMD ymm ymm ymm +// VPERMD m256 ymm ymm +// Construct and append a VPERMD instruction to the active function. +func (c *Context) VPERMD(my, y, y1 operand.Op) { + if inst, err := x86.VPERMD(my, y, y1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPERMD: Permute Doubleword Integers. +// +// Forms: +// +// VPERMD ymm ymm ymm +// VPERMD m256 ymm ymm +// Construct and append a VPERMD instruction to the active function. +// Operates on the global context. +func VPERMD(my, y, y1 operand.Op) { ctx.VPERMD(my, y, y1) } + +// VPERMILPD: Permute Double-Precision Floating-Point Values. +// +// Forms: +// +// VPERMILPD imm8 xmm xmm +// VPERMILPD xmm xmm xmm +// VPERMILPD m128 xmm xmm +// VPERMILPD imm8 m128 xmm +// VPERMILPD imm8 ymm ymm +// VPERMILPD ymm ymm ymm +// VPERMILPD m256 ymm ymm +// VPERMILPD imm8 m256 ymm +// Construct and append a VPERMILPD instruction to the active function. +func (c *Context) VPERMILPD(imxy, mxy, xy operand.Op) { + if inst, err := x86.VPERMILPD(imxy, mxy, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPERMILPD: Permute Double-Precision Floating-Point Values. +// +// Forms: +// +// VPERMILPD imm8 xmm xmm +// VPERMILPD xmm xmm xmm +// VPERMILPD m128 xmm xmm +// VPERMILPD imm8 m128 xmm +// VPERMILPD imm8 ymm ymm +// VPERMILPD ymm ymm ymm +// VPERMILPD m256 ymm ymm +// VPERMILPD imm8 m256 ymm +// Construct and append a VPERMILPD instruction to the active function. +// Operates on the global context. +func VPERMILPD(imxy, mxy, xy operand.Op) { ctx.VPERMILPD(imxy, mxy, xy) } + +// VPERMILPS: Permute Single-Precision Floating-Point Values. +// +// Forms: +// +// VPERMILPS imm8 xmm xmm +// VPERMILPS xmm xmm xmm +// VPERMILPS m128 xmm xmm +// VPERMILPS imm8 m128 xmm +// VPERMILPS imm8 ymm ymm +// VPERMILPS ymm ymm ymm +// VPERMILPS m256 ymm ymm +// VPERMILPS imm8 m256 ymm +// Construct and append a VPERMILPS instruction to the active function. +func (c *Context) VPERMILPS(imxy, mxy, xy operand.Op) { + if inst, err := x86.VPERMILPS(imxy, mxy, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPERMILPS: Permute Single-Precision Floating-Point Values. +// +// Forms: +// +// VPERMILPS imm8 xmm xmm +// VPERMILPS xmm xmm xmm +// VPERMILPS m128 xmm xmm +// VPERMILPS imm8 m128 xmm +// VPERMILPS imm8 ymm ymm +// VPERMILPS ymm ymm ymm +// VPERMILPS m256 ymm ymm +// VPERMILPS imm8 m256 ymm +// Construct and append a VPERMILPS instruction to the active function. +// Operates on the global context. +func VPERMILPS(imxy, mxy, xy operand.Op) { ctx.VPERMILPS(imxy, mxy, xy) } + +// VPERMPD: Permute Double-Precision Floating-Point Elements. +// +// Forms: +// +// VPERMPD imm8 ymm ymm +// VPERMPD imm8 m256 ymm +// Construct and append a VPERMPD instruction to the active function. +func (c *Context) VPERMPD(i, my, y operand.Op) { + if inst, err := x86.VPERMPD(i, my, y); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPERMPD: Permute Double-Precision Floating-Point Elements. +// +// Forms: +// +// VPERMPD imm8 ymm ymm +// VPERMPD imm8 m256 ymm +// Construct and append a VPERMPD instruction to the active function. +// Operates on the global context. +func VPERMPD(i, my, y operand.Op) { ctx.VPERMPD(i, my, y) } + +// VPERMPS: Permute Single-Precision Floating-Point Elements. +// +// Forms: +// +// VPERMPS ymm ymm ymm +// VPERMPS m256 ymm ymm +// Construct and append a VPERMPS instruction to the active function. +func (c *Context) VPERMPS(my, y, y1 operand.Op) { + if inst, err := x86.VPERMPS(my, y, y1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPERMPS: Permute Single-Precision Floating-Point Elements. +// +// Forms: +// +// VPERMPS ymm ymm ymm +// VPERMPS m256 ymm ymm +// Construct and append a VPERMPS instruction to the active function. +// Operates on the global context. +func VPERMPS(my, y, y1 operand.Op) { ctx.VPERMPS(my, y, y1) } + +// VPERMQ: Permute Quadword Integers. +// +// Forms: +// +// VPERMQ imm8 ymm ymm +// VPERMQ imm8 m256 ymm +// Construct and append a VPERMQ instruction to the active function. +func (c *Context) VPERMQ(i, my, y operand.Op) { + if inst, err := x86.VPERMQ(i, my, y); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPERMQ: Permute Quadword Integers. +// +// Forms: +// +// VPERMQ imm8 ymm ymm +// VPERMQ imm8 m256 ymm +// Construct and append a VPERMQ instruction to the active function. +// Operates on the global context. +func VPERMQ(i, my, y operand.Op) { ctx.VPERMQ(i, my, y) } + +// VPEXTRB: Extract Byte. +// +// Forms: +// +// VPEXTRB imm8 xmm r32 +// VPEXTRB imm8 xmm m8 +// Construct and append a VPEXTRB instruction to the active function. +func (c *Context) VPEXTRB(i, x, mr operand.Op) { + if inst, err := x86.VPEXTRB(i, x, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPEXTRB: Extract Byte. +// +// Forms: +// +// VPEXTRB imm8 xmm r32 +// VPEXTRB imm8 xmm m8 +// Construct and append a VPEXTRB instruction to the active function. +// Operates on the global context. +func VPEXTRB(i, x, mr operand.Op) { ctx.VPEXTRB(i, x, mr) } + +// VPEXTRD: Extract Doubleword. +// +// Forms: +// +// VPEXTRD imm8 xmm r32 +// VPEXTRD imm8 xmm m32 +// Construct and append a VPEXTRD instruction to the active function. +func (c *Context) VPEXTRD(i, x, mr operand.Op) { + if inst, err := x86.VPEXTRD(i, x, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPEXTRD: Extract Doubleword. +// +// Forms: +// +// VPEXTRD imm8 xmm r32 +// VPEXTRD imm8 xmm m32 +// Construct and append a VPEXTRD instruction to the active function. +// Operates on the global context. +func VPEXTRD(i, x, mr operand.Op) { ctx.VPEXTRD(i, x, mr) } + +// VPEXTRQ: Extract Quadword. +// +// Forms: +// +// VPEXTRQ imm8 xmm r64 +// VPEXTRQ imm8 xmm m64 +// Construct and append a VPEXTRQ instruction to the active function. +func (c *Context) VPEXTRQ(i, x, mr operand.Op) { + if inst, err := x86.VPEXTRQ(i, x, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPEXTRQ: Extract Quadword. +// +// Forms: +// +// VPEXTRQ imm8 xmm r64 +// VPEXTRQ imm8 xmm m64 +// Construct and append a VPEXTRQ instruction to the active function. +// Operates on the global context. +func VPEXTRQ(i, x, mr operand.Op) { ctx.VPEXTRQ(i, x, mr) } + +// VPEXTRW: Extract Word. +// +// Forms: +// +// VPEXTRW imm8 xmm r32 +// VPEXTRW imm8 xmm m16 +// Construct and append a VPEXTRW instruction to the active function. +func (c *Context) VPEXTRW(i, x, mr operand.Op) { + if inst, err := x86.VPEXTRW(i, x, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPEXTRW: Extract Word. +// +// Forms: +// +// VPEXTRW imm8 xmm r32 +// VPEXTRW imm8 xmm m16 +// Construct and append a VPEXTRW instruction to the active function. +// Operates on the global context. +func VPEXTRW(i, x, mr operand.Op) { ctx.VPEXTRW(i, x, mr) } + +// VPGATHERDD: Gather Packed Doubleword Values Using Signed Doubleword Indices. +// +// Forms: +// +// VPGATHERDD xmm vm32x xmm +// VPGATHERDD ymm vm32y ymm +// Construct and append a VPGATHERDD instruction to the active function. +func (c *Context) VPGATHERDD(xy, v, xy1 operand.Op) { + if inst, err := x86.VPGATHERDD(xy, v, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPGATHERDD: Gather Packed Doubleword Values Using Signed Doubleword Indices. +// +// Forms: +// +// VPGATHERDD xmm vm32x xmm +// VPGATHERDD ymm vm32y ymm +// Construct and append a VPGATHERDD instruction to the active function. +// Operates on the global context. +func VPGATHERDD(xy, v, xy1 operand.Op) { ctx.VPGATHERDD(xy, v, xy1) } + +// VPGATHERDQ: Gather Packed Quadword Values Using Signed Doubleword Indices. +// +// Forms: +// +// VPGATHERDQ xmm vm32x xmm +// VPGATHERDQ ymm vm32x ymm +// Construct and append a VPGATHERDQ instruction to the active function. +func (c *Context) VPGATHERDQ(xy, v, xy1 operand.Op) { + if inst, err := x86.VPGATHERDQ(xy, v, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPGATHERDQ: Gather Packed Quadword Values Using Signed Doubleword Indices. +// +// Forms: +// +// VPGATHERDQ xmm vm32x xmm +// VPGATHERDQ ymm vm32x ymm +// Construct and append a VPGATHERDQ instruction to the active function. +// Operates on the global context. +func VPGATHERDQ(xy, v, xy1 operand.Op) { ctx.VPGATHERDQ(xy, v, xy1) } + +// VPGATHERQD: Gather Packed Doubleword Values Using Signed Quadword Indices. +// +// Forms: +// +// VPGATHERQD xmm vm64x xmm +// VPGATHERQD xmm vm64y xmm +// Construct and append a VPGATHERQD instruction to the active function. +func (c *Context) VPGATHERQD(x, v, x1 operand.Op) { + if inst, err := x86.VPGATHERQD(x, v, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPGATHERQD: Gather Packed Doubleword Values Using Signed Quadword Indices. +// +// Forms: +// +// VPGATHERQD xmm vm64x xmm +// VPGATHERQD xmm vm64y xmm +// Construct and append a VPGATHERQD instruction to the active function. +// Operates on the global context. +func VPGATHERQD(x, v, x1 operand.Op) { ctx.VPGATHERQD(x, v, x1) } + +// VPGATHERQQ: Gather Packed Quadword Values Using Signed Quadword Indices. +// +// Forms: +// +// VPGATHERQQ xmm vm64x xmm +// VPGATHERQQ ymm vm64y ymm +// Construct and append a VPGATHERQQ instruction to the active function. +func (c *Context) VPGATHERQQ(xy, v, xy1 operand.Op) { + if inst, err := x86.VPGATHERQQ(xy, v, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPGATHERQQ: Gather Packed Quadword Values Using Signed Quadword Indices. +// +// Forms: +// +// VPGATHERQQ xmm vm64x xmm +// VPGATHERQQ ymm vm64y ymm +// Construct and append a VPGATHERQQ instruction to the active function. +// Operates on the global context. +func VPGATHERQQ(xy, v, xy1 operand.Op) { ctx.VPGATHERQQ(xy, v, xy1) } + +// VPHADDD: Packed Horizontal Add Doubleword Integer. +// +// Forms: +// +// VPHADDD xmm xmm xmm +// VPHADDD m128 xmm xmm +// VPHADDD ymm ymm ymm +// VPHADDD m256 ymm ymm +// Construct and append a VPHADDD instruction to the active function. +func (c *Context) VPHADDD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPHADDD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPHADDD: Packed Horizontal Add Doubleword Integer. +// +// Forms: +// +// VPHADDD xmm xmm xmm +// VPHADDD m128 xmm xmm +// VPHADDD ymm ymm ymm +// VPHADDD m256 ymm ymm +// Construct and append a VPHADDD instruction to the active function. +// Operates on the global context. +func VPHADDD(mxy, xy, xy1 operand.Op) { ctx.VPHADDD(mxy, xy, xy1) } + +// VPHADDSW: Packed Horizontal Add Signed Word Integers with Signed Saturation. +// +// Forms: +// +// VPHADDSW xmm xmm xmm +// VPHADDSW m128 xmm xmm +// VPHADDSW ymm ymm ymm +// VPHADDSW m256 ymm ymm +// Construct and append a VPHADDSW instruction to the active function. +func (c *Context) VPHADDSW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPHADDSW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPHADDSW: Packed Horizontal Add Signed Word Integers with Signed Saturation. +// +// Forms: +// +// VPHADDSW xmm xmm xmm +// VPHADDSW m128 xmm xmm +// VPHADDSW ymm ymm ymm +// VPHADDSW m256 ymm ymm +// Construct and append a VPHADDSW instruction to the active function. +// Operates on the global context. +func VPHADDSW(mxy, xy, xy1 operand.Op) { ctx.VPHADDSW(mxy, xy, xy1) } + +// VPHADDW: Packed Horizontal Add Word Integers. +// +// Forms: +// +// VPHADDW xmm xmm xmm +// VPHADDW m128 xmm xmm +// VPHADDW ymm ymm ymm +// VPHADDW m256 ymm ymm +// Construct and append a VPHADDW instruction to the active function. +func (c *Context) VPHADDW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPHADDW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPHADDW: Packed Horizontal Add Word Integers. +// +// Forms: +// +// VPHADDW xmm xmm xmm +// VPHADDW m128 xmm xmm +// VPHADDW ymm ymm ymm +// VPHADDW m256 ymm ymm +// Construct and append a VPHADDW instruction to the active function. +// Operates on the global context. +func VPHADDW(mxy, xy, xy1 operand.Op) { ctx.VPHADDW(mxy, xy, xy1) } + +// VPHMINPOSUW: Packed Horizontal Minimum of Unsigned Word Integers. +// +// Forms: +// +// VPHMINPOSUW xmm xmm +// VPHMINPOSUW m128 xmm +// Construct and append a VPHMINPOSUW instruction to the active function. +func (c *Context) VPHMINPOSUW(mx, x operand.Op) { + if inst, err := x86.VPHMINPOSUW(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPHMINPOSUW: Packed Horizontal Minimum of Unsigned Word Integers. +// +// Forms: +// +// VPHMINPOSUW xmm xmm +// VPHMINPOSUW m128 xmm +// Construct and append a VPHMINPOSUW instruction to the active function. +// Operates on the global context. +func VPHMINPOSUW(mx, x operand.Op) { ctx.VPHMINPOSUW(mx, x) } + +// VPHSUBD: Packed Horizontal Subtract Doubleword Integers. +// +// Forms: +// +// VPHSUBD xmm xmm xmm +// VPHSUBD m128 xmm xmm +// VPHSUBD ymm ymm ymm +// VPHSUBD m256 ymm ymm +// Construct and append a VPHSUBD instruction to the active function. +func (c *Context) VPHSUBD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPHSUBD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPHSUBD: Packed Horizontal Subtract Doubleword Integers. +// +// Forms: +// +// VPHSUBD xmm xmm xmm +// VPHSUBD m128 xmm xmm +// VPHSUBD ymm ymm ymm +// VPHSUBD m256 ymm ymm +// Construct and append a VPHSUBD instruction to the active function. +// Operates on the global context. +func VPHSUBD(mxy, xy, xy1 operand.Op) { ctx.VPHSUBD(mxy, xy, xy1) } + +// VPHSUBSW: Packed Horizontal Subtract Signed Word Integers with Signed Saturation. +// +// Forms: +// +// VPHSUBSW xmm xmm xmm +// VPHSUBSW m128 xmm xmm +// VPHSUBSW ymm ymm ymm +// VPHSUBSW m256 ymm ymm +// Construct and append a VPHSUBSW instruction to the active function. +func (c *Context) VPHSUBSW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPHSUBSW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPHSUBSW: Packed Horizontal Subtract Signed Word Integers with Signed Saturation. +// +// Forms: +// +// VPHSUBSW xmm xmm xmm +// VPHSUBSW m128 xmm xmm +// VPHSUBSW ymm ymm ymm +// VPHSUBSW m256 ymm ymm +// Construct and append a VPHSUBSW instruction to the active function. +// Operates on the global context. +func VPHSUBSW(mxy, xy, xy1 operand.Op) { ctx.VPHSUBSW(mxy, xy, xy1) } + +// VPHSUBW: Packed Horizontal Subtract Word Integers. +// +// Forms: +// +// VPHSUBW xmm xmm xmm +// VPHSUBW m128 xmm xmm +// VPHSUBW ymm ymm ymm +// VPHSUBW m256 ymm ymm +// Construct and append a VPHSUBW instruction to the active function. +func (c *Context) VPHSUBW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPHSUBW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPHSUBW: Packed Horizontal Subtract Word Integers. +// +// Forms: +// +// VPHSUBW xmm xmm xmm +// VPHSUBW m128 xmm xmm +// VPHSUBW ymm ymm ymm +// VPHSUBW m256 ymm ymm +// Construct and append a VPHSUBW instruction to the active function. +// Operates on the global context. +func VPHSUBW(mxy, xy, xy1 operand.Op) { ctx.VPHSUBW(mxy, xy, xy1) } + +// VPINSRB: Insert Byte. +// +// Forms: +// +// VPINSRB imm8 r32 xmm xmm +// VPINSRB imm8 m8 xmm xmm +// Construct and append a VPINSRB instruction to the active function. +func (c *Context) VPINSRB(i, mr, x, x1 operand.Op) { + if inst, err := x86.VPINSRB(i, mr, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPINSRB: Insert Byte. +// +// Forms: +// +// VPINSRB imm8 r32 xmm xmm +// VPINSRB imm8 m8 xmm xmm +// Construct and append a VPINSRB instruction to the active function. +// Operates on the global context. +func VPINSRB(i, mr, x, x1 operand.Op) { ctx.VPINSRB(i, mr, x, x1) } + +// VPINSRD: Insert Doubleword. +// +// Forms: +// +// VPINSRD imm8 r32 xmm xmm +// VPINSRD imm8 m32 xmm xmm +// Construct and append a VPINSRD instruction to the active function. +func (c *Context) VPINSRD(i, mr, x, x1 operand.Op) { + if inst, err := x86.VPINSRD(i, mr, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPINSRD: Insert Doubleword. +// +// Forms: +// +// VPINSRD imm8 r32 xmm xmm +// VPINSRD imm8 m32 xmm xmm +// Construct and append a VPINSRD instruction to the active function. +// Operates on the global context. +func VPINSRD(i, mr, x, x1 operand.Op) { ctx.VPINSRD(i, mr, x, x1) } + +// VPINSRQ: Insert Quadword. +// +// Forms: +// +// VPINSRQ imm8 r64 xmm xmm +// VPINSRQ imm8 m64 xmm xmm +// Construct and append a VPINSRQ instruction to the active function. +func (c *Context) VPINSRQ(i, mr, x, x1 operand.Op) { + if inst, err := x86.VPINSRQ(i, mr, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPINSRQ: Insert Quadword. +// +// Forms: +// +// VPINSRQ imm8 r64 xmm xmm +// VPINSRQ imm8 m64 xmm xmm +// Construct and append a VPINSRQ instruction to the active function. +// Operates on the global context. +func VPINSRQ(i, mr, x, x1 operand.Op) { ctx.VPINSRQ(i, mr, x, x1) } + +// VPINSRW: Insert Word. +// +// Forms: +// +// VPINSRW imm8 r32 xmm xmm +// VPINSRW imm8 m16 xmm xmm +// Construct and append a VPINSRW instruction to the active function. +func (c *Context) VPINSRW(i, mr, x, x1 operand.Op) { + if inst, err := x86.VPINSRW(i, mr, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPINSRW: Insert Word. +// +// Forms: +// +// VPINSRW imm8 r32 xmm xmm +// VPINSRW imm8 m16 xmm xmm +// Construct and append a VPINSRW instruction to the active function. +// Operates on the global context. +func VPINSRW(i, mr, x, x1 operand.Op) { ctx.VPINSRW(i, mr, x, x1) } + +// VPMADDUBSW: Multiply and Add Packed Signed and Unsigned Byte Integers. +// +// Forms: +// +// VPMADDUBSW xmm xmm xmm +// VPMADDUBSW m128 xmm xmm +// VPMADDUBSW ymm ymm ymm +// VPMADDUBSW m256 ymm ymm +// Construct and append a VPMADDUBSW instruction to the active function. +func (c *Context) VPMADDUBSW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPMADDUBSW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMADDUBSW: Multiply and Add Packed Signed and Unsigned Byte Integers. +// +// Forms: +// +// VPMADDUBSW xmm xmm xmm +// VPMADDUBSW m128 xmm xmm +// VPMADDUBSW ymm ymm ymm +// VPMADDUBSW m256 ymm ymm +// Construct and append a VPMADDUBSW instruction to the active function. +// Operates on the global context. +func VPMADDUBSW(mxy, xy, xy1 operand.Op) { ctx.VPMADDUBSW(mxy, xy, xy1) } + +// VPMADDWD: Multiply and Add Packed Signed Word Integers. +// +// Forms: +// +// VPMADDWD xmm xmm xmm +// VPMADDWD m128 xmm xmm +// VPMADDWD ymm ymm ymm +// VPMADDWD m256 ymm ymm +// Construct and append a VPMADDWD instruction to the active function. +func (c *Context) VPMADDWD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPMADDWD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMADDWD: Multiply and Add Packed Signed Word Integers. +// +// Forms: +// +// VPMADDWD xmm xmm xmm +// VPMADDWD m128 xmm xmm +// VPMADDWD ymm ymm ymm +// VPMADDWD m256 ymm ymm +// Construct and append a VPMADDWD instruction to the active function. +// Operates on the global context. +func VPMADDWD(mxy, xy, xy1 operand.Op) { ctx.VPMADDWD(mxy, xy, xy1) } + +// VPMASKMOVD: Conditional Move Packed Doubleword Integers. +// +// Forms: +// +// VPMASKMOVD m128 xmm xmm +// VPMASKMOVD m256 ymm ymm +// VPMASKMOVD xmm xmm m128 +// VPMASKMOVD ymm ymm m256 +// Construct and append a VPMASKMOVD instruction to the active function. +func (c *Context) VPMASKMOVD(mxy, xy, mxy1 operand.Op) { + if inst, err := x86.VPMASKMOVD(mxy, xy, mxy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMASKMOVD: Conditional Move Packed Doubleword Integers. +// +// Forms: +// +// VPMASKMOVD m128 xmm xmm +// VPMASKMOVD m256 ymm ymm +// VPMASKMOVD xmm xmm m128 +// VPMASKMOVD ymm ymm m256 +// Construct and append a VPMASKMOVD instruction to the active function. +// Operates on the global context. +func VPMASKMOVD(mxy, xy, mxy1 operand.Op) { ctx.VPMASKMOVD(mxy, xy, mxy1) } + +// VPMASKMOVQ: Conditional Move Packed Quadword Integers. +// +// Forms: +// +// VPMASKMOVQ m128 xmm xmm +// VPMASKMOVQ m256 ymm ymm +// VPMASKMOVQ xmm xmm m128 +// VPMASKMOVQ ymm ymm m256 +// Construct and append a VPMASKMOVQ instruction to the active function. +func (c *Context) VPMASKMOVQ(mxy, xy, mxy1 operand.Op) { + if inst, err := x86.VPMASKMOVQ(mxy, xy, mxy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMASKMOVQ: Conditional Move Packed Quadword Integers. +// +// Forms: +// +// VPMASKMOVQ m128 xmm xmm +// VPMASKMOVQ m256 ymm ymm +// VPMASKMOVQ xmm xmm m128 +// VPMASKMOVQ ymm ymm m256 +// Construct and append a VPMASKMOVQ instruction to the active function. +// Operates on the global context. +func VPMASKMOVQ(mxy, xy, mxy1 operand.Op) { ctx.VPMASKMOVQ(mxy, xy, mxy1) } + +// VPMAXSB: Maximum of Packed Signed Byte Integers. +// +// Forms: +// +// VPMAXSB xmm xmm xmm +// VPMAXSB m128 xmm xmm +// VPMAXSB ymm ymm ymm +// VPMAXSB m256 ymm ymm +// Construct and append a VPMAXSB instruction to the active function. +func (c *Context) VPMAXSB(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPMAXSB(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMAXSB: Maximum of Packed Signed Byte Integers. +// +// Forms: +// +// VPMAXSB xmm xmm xmm +// VPMAXSB m128 xmm xmm +// VPMAXSB ymm ymm ymm +// VPMAXSB m256 ymm ymm +// Construct and append a VPMAXSB instruction to the active function. +// Operates on the global context. +func VPMAXSB(mxy, xy, xy1 operand.Op) { ctx.VPMAXSB(mxy, xy, xy1) } + +// VPMAXSD: Maximum of Packed Signed Doubleword Integers. +// +// Forms: +// +// VPMAXSD xmm xmm xmm +// VPMAXSD m128 xmm xmm +// VPMAXSD ymm ymm ymm +// VPMAXSD m256 ymm ymm +// Construct and append a VPMAXSD instruction to the active function. +func (c *Context) VPMAXSD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPMAXSD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMAXSD: Maximum of Packed Signed Doubleword Integers. +// +// Forms: +// +// VPMAXSD xmm xmm xmm +// VPMAXSD m128 xmm xmm +// VPMAXSD ymm ymm ymm +// VPMAXSD m256 ymm ymm +// Construct and append a VPMAXSD instruction to the active function. +// Operates on the global context. +func VPMAXSD(mxy, xy, xy1 operand.Op) { ctx.VPMAXSD(mxy, xy, xy1) } + +// VPMAXSW: Maximum of Packed Signed Word Integers. +// +// Forms: +// +// VPMAXSW xmm xmm xmm +// VPMAXSW m128 xmm xmm +// VPMAXSW ymm ymm ymm +// VPMAXSW m256 ymm ymm +// Construct and append a VPMAXSW instruction to the active function. +func (c *Context) VPMAXSW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPMAXSW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMAXSW: Maximum of Packed Signed Word Integers. +// +// Forms: +// +// VPMAXSW xmm xmm xmm +// VPMAXSW m128 xmm xmm +// VPMAXSW ymm ymm ymm +// VPMAXSW m256 ymm ymm +// Construct and append a VPMAXSW instruction to the active function. +// Operates on the global context. +func VPMAXSW(mxy, xy, xy1 operand.Op) { ctx.VPMAXSW(mxy, xy, xy1) } + +// VPMAXUB: Maximum of Packed Unsigned Byte Integers. +// +// Forms: +// +// VPMAXUB xmm xmm xmm +// VPMAXUB m128 xmm xmm +// VPMAXUB ymm ymm ymm +// VPMAXUB m256 ymm ymm +// Construct and append a VPMAXUB instruction to the active function. +func (c *Context) VPMAXUB(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPMAXUB(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMAXUB: Maximum of Packed Unsigned Byte Integers. +// +// Forms: +// +// VPMAXUB xmm xmm xmm +// VPMAXUB m128 xmm xmm +// VPMAXUB ymm ymm ymm +// VPMAXUB m256 ymm ymm +// Construct and append a VPMAXUB instruction to the active function. +// Operates on the global context. +func VPMAXUB(mxy, xy, xy1 operand.Op) { ctx.VPMAXUB(mxy, xy, xy1) } + +// VPMAXUD: Maximum of Packed Unsigned Doubleword Integers. +// +// Forms: +// +// VPMAXUD xmm xmm xmm +// VPMAXUD m128 xmm xmm +// VPMAXUD ymm ymm ymm +// VPMAXUD m256 ymm ymm +// Construct and append a VPMAXUD instruction to the active function. +func (c *Context) VPMAXUD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPMAXUD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMAXUD: Maximum of Packed Unsigned Doubleword Integers. +// +// Forms: +// +// VPMAXUD xmm xmm xmm +// VPMAXUD m128 xmm xmm +// VPMAXUD ymm ymm ymm +// VPMAXUD m256 ymm ymm +// Construct and append a VPMAXUD instruction to the active function. +// Operates on the global context. +func VPMAXUD(mxy, xy, xy1 operand.Op) { ctx.VPMAXUD(mxy, xy, xy1) } + +// VPMAXUW: Maximum of Packed Unsigned Word Integers. +// +// Forms: +// +// VPMAXUW xmm xmm xmm +// VPMAXUW m128 xmm xmm +// VPMAXUW ymm ymm ymm +// VPMAXUW m256 ymm ymm +// Construct and append a VPMAXUW instruction to the active function. +func (c *Context) VPMAXUW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPMAXUW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMAXUW: Maximum of Packed Unsigned Word Integers. +// +// Forms: +// +// VPMAXUW xmm xmm xmm +// VPMAXUW m128 xmm xmm +// VPMAXUW ymm ymm ymm +// VPMAXUW m256 ymm ymm +// Construct and append a VPMAXUW instruction to the active function. +// Operates on the global context. +func VPMAXUW(mxy, xy, xy1 operand.Op) { ctx.VPMAXUW(mxy, xy, xy1) } + +// VPMINSB: Minimum of Packed Signed Byte Integers. +// +// Forms: +// +// VPMINSB xmm xmm xmm +// VPMINSB m128 xmm xmm +// VPMINSB ymm ymm ymm +// VPMINSB m256 ymm ymm +// Construct and append a VPMINSB instruction to the active function. +func (c *Context) VPMINSB(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPMINSB(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMINSB: Minimum of Packed Signed Byte Integers. +// +// Forms: +// +// VPMINSB xmm xmm xmm +// VPMINSB m128 xmm xmm +// VPMINSB ymm ymm ymm +// VPMINSB m256 ymm ymm +// Construct and append a VPMINSB instruction to the active function. +// Operates on the global context. +func VPMINSB(mxy, xy, xy1 operand.Op) { ctx.VPMINSB(mxy, xy, xy1) } + +// VPMINSD: Minimum of Packed Signed Doubleword Integers. +// +// Forms: +// +// VPMINSD xmm xmm xmm +// VPMINSD m128 xmm xmm +// VPMINSD ymm ymm ymm +// VPMINSD m256 ymm ymm +// Construct and append a VPMINSD instruction to the active function. +func (c *Context) VPMINSD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPMINSD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMINSD: Minimum of Packed Signed Doubleword Integers. +// +// Forms: +// +// VPMINSD xmm xmm xmm +// VPMINSD m128 xmm xmm +// VPMINSD ymm ymm ymm +// VPMINSD m256 ymm ymm +// Construct and append a VPMINSD instruction to the active function. +// Operates on the global context. +func VPMINSD(mxy, xy, xy1 operand.Op) { ctx.VPMINSD(mxy, xy, xy1) } + +// VPMINSW: Minimum of Packed Signed Word Integers. +// +// Forms: +// +// VPMINSW xmm xmm xmm +// VPMINSW m128 xmm xmm +// VPMINSW ymm ymm ymm +// VPMINSW m256 ymm ymm +// Construct and append a VPMINSW instruction to the active function. +func (c *Context) VPMINSW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPMINSW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMINSW: Minimum of Packed Signed Word Integers. +// +// Forms: +// +// VPMINSW xmm xmm xmm +// VPMINSW m128 xmm xmm +// VPMINSW ymm ymm ymm +// VPMINSW m256 ymm ymm +// Construct and append a VPMINSW instruction to the active function. +// Operates on the global context. +func VPMINSW(mxy, xy, xy1 operand.Op) { ctx.VPMINSW(mxy, xy, xy1) } + +// VPMINUB: Minimum of Packed Unsigned Byte Integers. +// +// Forms: +// +// VPMINUB xmm xmm xmm +// VPMINUB m128 xmm xmm +// VPMINUB ymm ymm ymm +// VPMINUB m256 ymm ymm +// Construct and append a VPMINUB instruction to the active function. +func (c *Context) VPMINUB(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPMINUB(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMINUB: Minimum of Packed Unsigned Byte Integers. +// +// Forms: +// +// VPMINUB xmm xmm xmm +// VPMINUB m128 xmm xmm +// VPMINUB ymm ymm ymm +// VPMINUB m256 ymm ymm +// Construct and append a VPMINUB instruction to the active function. +// Operates on the global context. +func VPMINUB(mxy, xy, xy1 operand.Op) { ctx.VPMINUB(mxy, xy, xy1) } + +// VPMINUD: Minimum of Packed Unsigned Doubleword Integers. +// +// Forms: +// +// VPMINUD xmm xmm xmm +// VPMINUD m128 xmm xmm +// VPMINUD ymm ymm ymm +// VPMINUD m256 ymm ymm +// Construct and append a VPMINUD instruction to the active function. +func (c *Context) VPMINUD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPMINUD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMINUD: Minimum of Packed Unsigned Doubleword Integers. +// +// Forms: +// +// VPMINUD xmm xmm xmm +// VPMINUD m128 xmm xmm +// VPMINUD ymm ymm ymm +// VPMINUD m256 ymm ymm +// Construct and append a VPMINUD instruction to the active function. +// Operates on the global context. +func VPMINUD(mxy, xy, xy1 operand.Op) { ctx.VPMINUD(mxy, xy, xy1) } + +// VPMINUW: Minimum of Packed Unsigned Word Integers. +// +// Forms: +// +// VPMINUW xmm xmm xmm +// VPMINUW m128 xmm xmm +// VPMINUW ymm ymm ymm +// VPMINUW m256 ymm ymm +// Construct and append a VPMINUW instruction to the active function. +func (c *Context) VPMINUW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPMINUW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMINUW: Minimum of Packed Unsigned Word Integers. +// +// Forms: +// +// VPMINUW xmm xmm xmm +// VPMINUW m128 xmm xmm +// VPMINUW ymm ymm ymm +// VPMINUW m256 ymm ymm +// Construct and append a VPMINUW instruction to the active function. +// Operates on the global context. +func VPMINUW(mxy, xy, xy1 operand.Op) { ctx.VPMINUW(mxy, xy, xy1) } + +// VPMOVMSKB: Move Byte Mask. +// +// Forms: +// +// VPMOVMSKB xmm r32 +// VPMOVMSKB ymm r32 +// Construct and append a VPMOVMSKB instruction to the active function. +func (c *Context) VPMOVMSKB(xy, r operand.Op) { + if inst, err := x86.VPMOVMSKB(xy, r); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMOVMSKB: Move Byte Mask. +// +// Forms: +// +// VPMOVMSKB xmm r32 +// VPMOVMSKB ymm r32 +// Construct and append a VPMOVMSKB instruction to the active function. +// Operates on the global context. +func VPMOVMSKB(xy, r operand.Op) { ctx.VPMOVMSKB(xy, r) } + +// VPMOVSXBD: Move Packed Byte Integers to Doubleword Integers with Sign Extension. +// +// Forms: +// +// VPMOVSXBD xmm xmm +// VPMOVSXBD m32 xmm +// VPMOVSXBD xmm ymm +// VPMOVSXBD m64 ymm +// Construct and append a VPMOVSXBD instruction to the active function. +func (c *Context) VPMOVSXBD(mx, xy operand.Op) { + if inst, err := x86.VPMOVSXBD(mx, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMOVSXBD: Move Packed Byte Integers to Doubleword Integers with Sign Extension. +// +// Forms: +// +// VPMOVSXBD xmm xmm +// VPMOVSXBD m32 xmm +// VPMOVSXBD xmm ymm +// VPMOVSXBD m64 ymm +// Construct and append a VPMOVSXBD instruction to the active function. +// Operates on the global context. +func VPMOVSXBD(mx, xy operand.Op) { ctx.VPMOVSXBD(mx, xy) } + +// VPMOVSXBQ: Move Packed Byte Integers to Quadword Integers with Sign Extension. +// +// Forms: +// +// VPMOVSXBQ xmm xmm +// VPMOVSXBQ m16 xmm +// VPMOVSXBQ xmm ymm +// VPMOVSXBQ m32 ymm +// Construct and append a VPMOVSXBQ instruction to the active function. +func (c *Context) VPMOVSXBQ(mx, xy operand.Op) { + if inst, err := x86.VPMOVSXBQ(mx, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMOVSXBQ: Move Packed Byte Integers to Quadword Integers with Sign Extension. +// +// Forms: +// +// VPMOVSXBQ xmm xmm +// VPMOVSXBQ m16 xmm +// VPMOVSXBQ xmm ymm +// VPMOVSXBQ m32 ymm +// Construct and append a VPMOVSXBQ instruction to the active function. +// Operates on the global context. +func VPMOVSXBQ(mx, xy operand.Op) { ctx.VPMOVSXBQ(mx, xy) } + +// VPMOVSXBW: Move Packed Byte Integers to Word Integers with Sign Extension. +// +// Forms: +// +// VPMOVSXBW xmm xmm +// VPMOVSXBW m64 xmm +// VPMOVSXBW xmm ymm +// VPMOVSXBW m128 ymm +// Construct and append a VPMOVSXBW instruction to the active function. +func (c *Context) VPMOVSXBW(mx, xy operand.Op) { + if inst, err := x86.VPMOVSXBW(mx, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMOVSXBW: Move Packed Byte Integers to Word Integers with Sign Extension. +// +// Forms: +// +// VPMOVSXBW xmm xmm +// VPMOVSXBW m64 xmm +// VPMOVSXBW xmm ymm +// VPMOVSXBW m128 ymm +// Construct and append a VPMOVSXBW instruction to the active function. +// Operates on the global context. +func VPMOVSXBW(mx, xy operand.Op) { ctx.VPMOVSXBW(mx, xy) } + +// VPMOVSXDQ: Move Packed Doubleword Integers to Quadword Integers with Sign Extension. +// +// Forms: +// +// VPMOVSXDQ xmm xmm +// VPMOVSXDQ m64 xmm +// VPMOVSXDQ xmm ymm +// VPMOVSXDQ m128 ymm +// Construct and append a VPMOVSXDQ instruction to the active function. +func (c *Context) VPMOVSXDQ(mx, xy operand.Op) { + if inst, err := x86.VPMOVSXDQ(mx, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMOVSXDQ: Move Packed Doubleword Integers to Quadword Integers with Sign Extension. +// +// Forms: +// +// VPMOVSXDQ xmm xmm +// VPMOVSXDQ m64 xmm +// VPMOVSXDQ xmm ymm +// VPMOVSXDQ m128 ymm +// Construct and append a VPMOVSXDQ instruction to the active function. +// Operates on the global context. +func VPMOVSXDQ(mx, xy operand.Op) { ctx.VPMOVSXDQ(mx, xy) } + +// VPMOVSXWD: Move Packed Word Integers to Doubleword Integers with Sign Extension. +// +// Forms: +// +// VPMOVSXWD xmm xmm +// VPMOVSXWD m64 xmm +// VPMOVSXWD xmm ymm +// VPMOVSXWD m128 ymm +// Construct and append a VPMOVSXWD instruction to the active function. +func (c *Context) VPMOVSXWD(mx, xy operand.Op) { + if inst, err := x86.VPMOVSXWD(mx, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMOVSXWD: Move Packed Word Integers to Doubleword Integers with Sign Extension. +// +// Forms: +// +// VPMOVSXWD xmm xmm +// VPMOVSXWD m64 xmm +// VPMOVSXWD xmm ymm +// VPMOVSXWD m128 ymm +// Construct and append a VPMOVSXWD instruction to the active function. +// Operates on the global context. +func VPMOVSXWD(mx, xy operand.Op) { ctx.VPMOVSXWD(mx, xy) } + +// VPMOVSXWQ: Move Packed Word Integers to Quadword Integers with Sign Extension. +// +// Forms: +// +// VPMOVSXWQ xmm xmm +// VPMOVSXWQ m32 xmm +// VPMOVSXWQ xmm ymm +// VPMOVSXWQ m64 ymm +// Construct and append a VPMOVSXWQ instruction to the active function. +func (c *Context) VPMOVSXWQ(mx, xy operand.Op) { + if inst, err := x86.VPMOVSXWQ(mx, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMOVSXWQ: Move Packed Word Integers to Quadword Integers with Sign Extension. +// +// Forms: +// +// VPMOVSXWQ xmm xmm +// VPMOVSXWQ m32 xmm +// VPMOVSXWQ xmm ymm +// VPMOVSXWQ m64 ymm +// Construct and append a VPMOVSXWQ instruction to the active function. +// Operates on the global context. +func VPMOVSXWQ(mx, xy operand.Op) { ctx.VPMOVSXWQ(mx, xy) } + +// VPMOVZXBD: Move Packed Byte Integers to Doubleword Integers with Zero Extension. +// +// Forms: +// +// VPMOVZXBD xmm xmm +// VPMOVZXBD m32 xmm +// VPMOVZXBD xmm ymm +// VPMOVZXBD m64 ymm +// Construct and append a VPMOVZXBD instruction to the active function. +func (c *Context) VPMOVZXBD(mx, xy operand.Op) { + if inst, err := x86.VPMOVZXBD(mx, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMOVZXBD: Move Packed Byte Integers to Doubleword Integers with Zero Extension. +// +// Forms: +// +// VPMOVZXBD xmm xmm +// VPMOVZXBD m32 xmm +// VPMOVZXBD xmm ymm +// VPMOVZXBD m64 ymm +// Construct and append a VPMOVZXBD instruction to the active function. +// Operates on the global context. +func VPMOVZXBD(mx, xy operand.Op) { ctx.VPMOVZXBD(mx, xy) } + +// VPMOVZXBQ: Move Packed Byte Integers to Quadword Integers with Zero Extension. +// +// Forms: +// +// VPMOVZXBQ xmm xmm +// VPMOVZXBQ m16 xmm +// VPMOVZXBQ xmm ymm +// VPMOVZXBQ m32 ymm +// Construct and append a VPMOVZXBQ instruction to the active function. +func (c *Context) VPMOVZXBQ(mx, xy operand.Op) { + if inst, err := x86.VPMOVZXBQ(mx, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMOVZXBQ: Move Packed Byte Integers to Quadword Integers with Zero Extension. +// +// Forms: +// +// VPMOVZXBQ xmm xmm +// VPMOVZXBQ m16 xmm +// VPMOVZXBQ xmm ymm +// VPMOVZXBQ m32 ymm +// Construct and append a VPMOVZXBQ instruction to the active function. +// Operates on the global context. +func VPMOVZXBQ(mx, xy operand.Op) { ctx.VPMOVZXBQ(mx, xy) } + +// VPMOVZXBW: Move Packed Byte Integers to Word Integers with Zero Extension. +// +// Forms: +// +// VPMOVZXBW xmm xmm +// VPMOVZXBW m64 xmm +// VPMOVZXBW xmm ymm +// VPMOVZXBW m128 ymm +// Construct and append a VPMOVZXBW instruction to the active function. +func (c *Context) VPMOVZXBW(mx, xy operand.Op) { + if inst, err := x86.VPMOVZXBW(mx, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMOVZXBW: Move Packed Byte Integers to Word Integers with Zero Extension. +// +// Forms: +// +// VPMOVZXBW xmm xmm +// VPMOVZXBW m64 xmm +// VPMOVZXBW xmm ymm +// VPMOVZXBW m128 ymm +// Construct and append a VPMOVZXBW instruction to the active function. +// Operates on the global context. +func VPMOVZXBW(mx, xy operand.Op) { ctx.VPMOVZXBW(mx, xy) } + +// VPMOVZXDQ: Move Packed Doubleword Integers to Quadword Integers with Zero Extension. +// +// Forms: +// +// VPMOVZXDQ xmm xmm +// VPMOVZXDQ m64 xmm +// VPMOVZXDQ xmm ymm +// VPMOVZXDQ m128 ymm +// Construct and append a VPMOVZXDQ instruction to the active function. +func (c *Context) VPMOVZXDQ(mx, xy operand.Op) { + if inst, err := x86.VPMOVZXDQ(mx, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMOVZXDQ: Move Packed Doubleword Integers to Quadword Integers with Zero Extension. +// +// Forms: +// +// VPMOVZXDQ xmm xmm +// VPMOVZXDQ m64 xmm +// VPMOVZXDQ xmm ymm +// VPMOVZXDQ m128 ymm +// Construct and append a VPMOVZXDQ instruction to the active function. +// Operates on the global context. +func VPMOVZXDQ(mx, xy operand.Op) { ctx.VPMOVZXDQ(mx, xy) } + +// VPMOVZXWD: Move Packed Word Integers to Doubleword Integers with Zero Extension. +// +// Forms: +// +// VPMOVZXWD xmm xmm +// VPMOVZXWD m64 xmm +// VPMOVZXWD xmm ymm +// VPMOVZXWD m128 ymm +// Construct and append a VPMOVZXWD instruction to the active function. +func (c *Context) VPMOVZXWD(mx, xy operand.Op) { + if inst, err := x86.VPMOVZXWD(mx, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMOVZXWD: Move Packed Word Integers to Doubleword Integers with Zero Extension. +// +// Forms: +// +// VPMOVZXWD xmm xmm +// VPMOVZXWD m64 xmm +// VPMOVZXWD xmm ymm +// VPMOVZXWD m128 ymm +// Construct and append a VPMOVZXWD instruction to the active function. +// Operates on the global context. +func VPMOVZXWD(mx, xy operand.Op) { ctx.VPMOVZXWD(mx, xy) } + +// VPMOVZXWQ: Move Packed Word Integers to Quadword Integers with Zero Extension. +// +// Forms: +// +// VPMOVZXWQ xmm xmm +// VPMOVZXWQ m32 xmm +// VPMOVZXWQ xmm ymm +// VPMOVZXWQ m64 ymm +// Construct and append a VPMOVZXWQ instruction to the active function. +func (c *Context) VPMOVZXWQ(mx, xy operand.Op) { + if inst, err := x86.VPMOVZXWQ(mx, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMOVZXWQ: Move Packed Word Integers to Quadword Integers with Zero Extension. +// +// Forms: +// +// VPMOVZXWQ xmm xmm +// VPMOVZXWQ m32 xmm +// VPMOVZXWQ xmm ymm +// VPMOVZXWQ m64 ymm +// Construct and append a VPMOVZXWQ instruction to the active function. +// Operates on the global context. +func VPMOVZXWQ(mx, xy operand.Op) { ctx.VPMOVZXWQ(mx, xy) } + +// VPMULDQ: Multiply Packed Signed Doubleword Integers and Store Quadword Result. +// +// Forms: +// +// VPMULDQ xmm xmm xmm +// VPMULDQ m128 xmm xmm +// VPMULDQ ymm ymm ymm +// VPMULDQ m256 ymm ymm +// Construct and append a VPMULDQ instruction to the active function. +func (c *Context) VPMULDQ(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPMULDQ(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMULDQ: Multiply Packed Signed Doubleword Integers and Store Quadword Result. +// +// Forms: +// +// VPMULDQ xmm xmm xmm +// VPMULDQ m128 xmm xmm +// VPMULDQ ymm ymm ymm +// VPMULDQ m256 ymm ymm +// Construct and append a VPMULDQ instruction to the active function. +// Operates on the global context. +func VPMULDQ(mxy, xy, xy1 operand.Op) { ctx.VPMULDQ(mxy, xy, xy1) } + +// VPMULHRSW: Packed Multiply Signed Word Integers and Store High Result with Round and Scale. +// +// Forms: +// +// VPMULHRSW xmm xmm xmm +// VPMULHRSW m128 xmm xmm +// VPMULHRSW ymm ymm ymm +// VPMULHRSW m256 ymm ymm +// Construct and append a VPMULHRSW instruction to the active function. +func (c *Context) VPMULHRSW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPMULHRSW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMULHRSW: Packed Multiply Signed Word Integers and Store High Result with Round and Scale. +// +// Forms: +// +// VPMULHRSW xmm xmm xmm +// VPMULHRSW m128 xmm xmm +// VPMULHRSW ymm ymm ymm +// VPMULHRSW m256 ymm ymm +// Construct and append a VPMULHRSW instruction to the active function. +// Operates on the global context. +func VPMULHRSW(mxy, xy, xy1 operand.Op) { ctx.VPMULHRSW(mxy, xy, xy1) } + +// VPMULHUW: Multiply Packed Unsigned Word Integers and Store High Result. +// +// Forms: +// +// VPMULHUW xmm xmm xmm +// VPMULHUW m128 xmm xmm +// VPMULHUW ymm ymm ymm +// VPMULHUW m256 ymm ymm +// Construct and append a VPMULHUW instruction to the active function. +func (c *Context) VPMULHUW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPMULHUW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMULHUW: Multiply Packed Unsigned Word Integers and Store High Result. +// +// Forms: +// +// VPMULHUW xmm xmm xmm +// VPMULHUW m128 xmm xmm +// VPMULHUW ymm ymm ymm +// VPMULHUW m256 ymm ymm +// Construct and append a VPMULHUW instruction to the active function. +// Operates on the global context. +func VPMULHUW(mxy, xy, xy1 operand.Op) { ctx.VPMULHUW(mxy, xy, xy1) } + +// VPMULHW: Multiply Packed Signed Word Integers and Store High Result. +// +// Forms: +// +// VPMULHW xmm xmm xmm +// VPMULHW m128 xmm xmm +// VPMULHW ymm ymm ymm +// VPMULHW m256 ymm ymm +// Construct and append a VPMULHW instruction to the active function. +func (c *Context) VPMULHW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPMULHW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMULHW: Multiply Packed Signed Word Integers and Store High Result. +// +// Forms: +// +// VPMULHW xmm xmm xmm +// VPMULHW m128 xmm xmm +// VPMULHW ymm ymm ymm +// VPMULHW m256 ymm ymm +// Construct and append a VPMULHW instruction to the active function. +// Operates on the global context. +func VPMULHW(mxy, xy, xy1 operand.Op) { ctx.VPMULHW(mxy, xy, xy1) } + +// VPMULLD: Multiply Packed Signed Doubleword Integers and Store Low Result. +// +// Forms: +// +// VPMULLD xmm xmm xmm +// VPMULLD m128 xmm xmm +// VPMULLD ymm ymm ymm +// VPMULLD m256 ymm ymm +// Construct and append a VPMULLD instruction to the active function. +func (c *Context) VPMULLD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPMULLD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMULLD: Multiply Packed Signed Doubleword Integers and Store Low Result. +// +// Forms: +// +// VPMULLD xmm xmm xmm +// VPMULLD m128 xmm xmm +// VPMULLD ymm ymm ymm +// VPMULLD m256 ymm ymm +// Construct and append a VPMULLD instruction to the active function. +// Operates on the global context. +func VPMULLD(mxy, xy, xy1 operand.Op) { ctx.VPMULLD(mxy, xy, xy1) } + +// VPMULLW: Multiply Packed Signed Word Integers and Store Low Result. +// +// Forms: +// +// VPMULLW xmm xmm xmm +// VPMULLW m128 xmm xmm +// VPMULLW ymm ymm ymm +// VPMULLW m256 ymm ymm +// Construct and append a VPMULLW instruction to the active function. +func (c *Context) VPMULLW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPMULLW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMULLW: Multiply Packed Signed Word Integers and Store Low Result. +// +// Forms: +// +// VPMULLW xmm xmm xmm +// VPMULLW m128 xmm xmm +// VPMULLW ymm ymm ymm +// VPMULLW m256 ymm ymm +// Construct and append a VPMULLW instruction to the active function. +// Operates on the global context. +func VPMULLW(mxy, xy, xy1 operand.Op) { ctx.VPMULLW(mxy, xy, xy1) } + +// VPMULUDQ: Multiply Packed Unsigned Doubleword Integers. +// +// Forms: +// +// VPMULUDQ xmm xmm xmm +// VPMULUDQ m128 xmm xmm +// VPMULUDQ ymm ymm ymm +// VPMULUDQ m256 ymm ymm +// Construct and append a VPMULUDQ instruction to the active function. +func (c *Context) VPMULUDQ(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPMULUDQ(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPMULUDQ: Multiply Packed Unsigned Doubleword Integers. +// +// Forms: +// +// VPMULUDQ xmm xmm xmm +// VPMULUDQ m128 xmm xmm +// VPMULUDQ ymm ymm ymm +// VPMULUDQ m256 ymm ymm +// Construct and append a VPMULUDQ instruction to the active function. +// Operates on the global context. +func VPMULUDQ(mxy, xy, xy1 operand.Op) { ctx.VPMULUDQ(mxy, xy, xy1) } + +// VPOR: Packed Bitwise Logical OR. +// +// Forms: +// +// VPOR xmm xmm xmm +// VPOR m128 xmm xmm +// VPOR ymm ymm ymm +// VPOR m256 ymm ymm +// Construct and append a VPOR instruction to the active function. +func (c *Context) VPOR(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPOR(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPOR: Packed Bitwise Logical OR. +// +// Forms: +// +// VPOR xmm xmm xmm +// VPOR m128 xmm xmm +// VPOR ymm ymm ymm +// VPOR m256 ymm ymm +// Construct and append a VPOR instruction to the active function. +// Operates on the global context. +func VPOR(mxy, xy, xy1 operand.Op) { ctx.VPOR(mxy, xy, xy1) } + +// VPSADBW: Compute Sum of Absolute Differences. +// +// Forms: +// +// VPSADBW xmm xmm xmm +// VPSADBW m128 xmm xmm +// VPSADBW ymm ymm ymm +// VPSADBW m256 ymm ymm +// Construct and append a VPSADBW instruction to the active function. +func (c *Context) VPSADBW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPSADBW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSADBW: Compute Sum of Absolute Differences. +// +// Forms: +// +// VPSADBW xmm xmm xmm +// VPSADBW m128 xmm xmm +// VPSADBW ymm ymm ymm +// VPSADBW m256 ymm ymm +// Construct and append a VPSADBW instruction to the active function. +// Operates on the global context. +func VPSADBW(mxy, xy, xy1 operand.Op) { ctx.VPSADBW(mxy, xy, xy1) } + +// VPSHUFB: Packed Shuffle Bytes. +// +// Forms: +// +// VPSHUFB xmm xmm xmm +// VPSHUFB m128 xmm xmm +// VPSHUFB ymm ymm ymm +// VPSHUFB m256 ymm ymm +// Construct and append a VPSHUFB instruction to the active function. +func (c *Context) VPSHUFB(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPSHUFB(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSHUFB: Packed Shuffle Bytes. +// +// Forms: +// +// VPSHUFB xmm xmm xmm +// VPSHUFB m128 xmm xmm +// VPSHUFB ymm ymm ymm +// VPSHUFB m256 ymm ymm +// Construct and append a VPSHUFB instruction to the active function. +// Operates on the global context. +func VPSHUFB(mxy, xy, xy1 operand.Op) { ctx.VPSHUFB(mxy, xy, xy1) } + +// VPSHUFD: Shuffle Packed Doublewords. +// +// Forms: +// +// VPSHUFD imm8 xmm xmm +// VPSHUFD imm8 m128 xmm +// VPSHUFD imm8 ymm ymm +// VPSHUFD imm8 m256 ymm +// Construct and append a VPSHUFD instruction to the active function. +func (c *Context) VPSHUFD(i, mxy, xy operand.Op) { + if inst, err := x86.VPSHUFD(i, mxy, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSHUFD: Shuffle Packed Doublewords. +// +// Forms: +// +// VPSHUFD imm8 xmm xmm +// VPSHUFD imm8 m128 xmm +// VPSHUFD imm8 ymm ymm +// VPSHUFD imm8 m256 ymm +// Construct and append a VPSHUFD instruction to the active function. +// Operates on the global context. +func VPSHUFD(i, mxy, xy operand.Op) { ctx.VPSHUFD(i, mxy, xy) } + +// VPSHUFHW: Shuffle Packed High Words. +// +// Forms: +// +// VPSHUFHW imm8 xmm xmm +// VPSHUFHW imm8 m128 xmm +// VPSHUFHW imm8 ymm ymm +// VPSHUFHW imm8 m256 ymm +// Construct and append a VPSHUFHW instruction to the active function. +func (c *Context) VPSHUFHW(i, mxy, xy operand.Op) { + if inst, err := x86.VPSHUFHW(i, mxy, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSHUFHW: Shuffle Packed High Words. +// +// Forms: +// +// VPSHUFHW imm8 xmm xmm +// VPSHUFHW imm8 m128 xmm +// VPSHUFHW imm8 ymm ymm +// VPSHUFHW imm8 m256 ymm +// Construct and append a VPSHUFHW instruction to the active function. +// Operates on the global context. +func VPSHUFHW(i, mxy, xy operand.Op) { ctx.VPSHUFHW(i, mxy, xy) } + +// VPSHUFLW: Shuffle Packed Low Words. +// +// Forms: +// +// VPSHUFLW imm8 xmm xmm +// VPSHUFLW imm8 m128 xmm +// VPSHUFLW imm8 ymm ymm +// VPSHUFLW imm8 m256 ymm +// Construct and append a VPSHUFLW instruction to the active function. +func (c *Context) VPSHUFLW(i, mxy, xy operand.Op) { + if inst, err := x86.VPSHUFLW(i, mxy, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSHUFLW: Shuffle Packed Low Words. +// +// Forms: +// +// VPSHUFLW imm8 xmm xmm +// VPSHUFLW imm8 m128 xmm +// VPSHUFLW imm8 ymm ymm +// VPSHUFLW imm8 m256 ymm +// Construct and append a VPSHUFLW instruction to the active function. +// Operates on the global context. +func VPSHUFLW(i, mxy, xy operand.Op) { ctx.VPSHUFLW(i, mxy, xy) } + +// VPSIGNB: Packed Sign of Byte Integers. +// +// Forms: +// +// VPSIGNB xmm xmm xmm +// VPSIGNB m128 xmm xmm +// VPSIGNB ymm ymm ymm +// VPSIGNB m256 ymm ymm +// Construct and append a VPSIGNB instruction to the active function. +func (c *Context) VPSIGNB(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPSIGNB(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSIGNB: Packed Sign of Byte Integers. +// +// Forms: +// +// VPSIGNB xmm xmm xmm +// VPSIGNB m128 xmm xmm +// VPSIGNB ymm ymm ymm +// VPSIGNB m256 ymm ymm +// Construct and append a VPSIGNB instruction to the active function. +// Operates on the global context. +func VPSIGNB(mxy, xy, xy1 operand.Op) { ctx.VPSIGNB(mxy, xy, xy1) } + +// VPSIGND: Packed Sign of Doubleword Integers. +// +// Forms: +// +// VPSIGND xmm xmm xmm +// VPSIGND m128 xmm xmm +// VPSIGND ymm ymm ymm +// VPSIGND m256 ymm ymm +// Construct and append a VPSIGND instruction to the active function. +func (c *Context) VPSIGND(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPSIGND(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSIGND: Packed Sign of Doubleword Integers. +// +// Forms: +// +// VPSIGND xmm xmm xmm +// VPSIGND m128 xmm xmm +// VPSIGND ymm ymm ymm +// VPSIGND m256 ymm ymm +// Construct and append a VPSIGND instruction to the active function. +// Operates on the global context. +func VPSIGND(mxy, xy, xy1 operand.Op) { ctx.VPSIGND(mxy, xy, xy1) } + +// VPSIGNW: Packed Sign of Word Integers. +// +// Forms: +// +// VPSIGNW xmm xmm xmm +// VPSIGNW m128 xmm xmm +// VPSIGNW ymm ymm ymm +// VPSIGNW m256 ymm ymm +// Construct and append a VPSIGNW instruction to the active function. +func (c *Context) VPSIGNW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPSIGNW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSIGNW: Packed Sign of Word Integers. +// +// Forms: +// +// VPSIGNW xmm xmm xmm +// VPSIGNW m128 xmm xmm +// VPSIGNW ymm ymm ymm +// VPSIGNW m256 ymm ymm +// Construct and append a VPSIGNW instruction to the active function. +// Operates on the global context. +func VPSIGNW(mxy, xy, xy1 operand.Op) { ctx.VPSIGNW(mxy, xy, xy1) } + +// VPSLLD: Shift Packed Doubleword Data Left Logical. +// +// Forms: +// +// VPSLLD imm8 xmm xmm +// VPSLLD xmm xmm xmm +// VPSLLD m128 xmm xmm +// VPSLLD imm8 ymm ymm +// VPSLLD xmm ymm ymm +// VPSLLD m128 ymm ymm +// Construct and append a VPSLLD instruction to the active function. +func (c *Context) VPSLLD(imx, xy, xy1 operand.Op) { + if inst, err := x86.VPSLLD(imx, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSLLD: Shift Packed Doubleword Data Left Logical. +// +// Forms: +// +// VPSLLD imm8 xmm xmm +// VPSLLD xmm xmm xmm +// VPSLLD m128 xmm xmm +// VPSLLD imm8 ymm ymm +// VPSLLD xmm ymm ymm +// VPSLLD m128 ymm ymm +// Construct and append a VPSLLD instruction to the active function. +// Operates on the global context. +func VPSLLD(imx, xy, xy1 operand.Op) { ctx.VPSLLD(imx, xy, xy1) } + +// VPSLLDQ: Shift Packed Double Quadword Left Logical. +// +// Forms: +// +// VPSLLDQ imm8 xmm xmm +// VPSLLDQ imm8 ymm ymm +// Construct and append a VPSLLDQ instruction to the active function. +func (c *Context) VPSLLDQ(i, xy, xy1 operand.Op) { + if inst, err := x86.VPSLLDQ(i, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSLLDQ: Shift Packed Double Quadword Left Logical. +// +// Forms: +// +// VPSLLDQ imm8 xmm xmm +// VPSLLDQ imm8 ymm ymm +// Construct and append a VPSLLDQ instruction to the active function. +// Operates on the global context. +func VPSLLDQ(i, xy, xy1 operand.Op) { ctx.VPSLLDQ(i, xy, xy1) } + +// VPSLLQ: Shift Packed Quadword Data Left Logical. +// +// Forms: +// +// VPSLLQ imm8 xmm xmm +// VPSLLQ xmm xmm xmm +// VPSLLQ m128 xmm xmm +// VPSLLQ imm8 ymm ymm +// VPSLLQ xmm ymm ymm +// VPSLLQ m128 ymm ymm +// Construct and append a VPSLLQ instruction to the active function. +func (c *Context) VPSLLQ(imx, xy, xy1 operand.Op) { + if inst, err := x86.VPSLLQ(imx, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSLLQ: Shift Packed Quadword Data Left Logical. +// +// Forms: +// +// VPSLLQ imm8 xmm xmm +// VPSLLQ xmm xmm xmm +// VPSLLQ m128 xmm xmm +// VPSLLQ imm8 ymm ymm +// VPSLLQ xmm ymm ymm +// VPSLLQ m128 ymm ymm +// Construct and append a VPSLLQ instruction to the active function. +// Operates on the global context. +func VPSLLQ(imx, xy, xy1 operand.Op) { ctx.VPSLLQ(imx, xy, xy1) } + +// VPSLLVD: Variable Shift Packed Doubleword Data Left Logical. +// +// Forms: +// +// VPSLLVD xmm xmm xmm +// VPSLLVD m128 xmm xmm +// VPSLLVD ymm ymm ymm +// VPSLLVD m256 ymm ymm +// Construct and append a VPSLLVD instruction to the active function. +func (c *Context) VPSLLVD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPSLLVD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSLLVD: Variable Shift Packed Doubleword Data Left Logical. +// +// Forms: +// +// VPSLLVD xmm xmm xmm +// VPSLLVD m128 xmm xmm +// VPSLLVD ymm ymm ymm +// VPSLLVD m256 ymm ymm +// Construct and append a VPSLLVD instruction to the active function. +// Operates on the global context. +func VPSLLVD(mxy, xy, xy1 operand.Op) { ctx.VPSLLVD(mxy, xy, xy1) } + +// VPSLLVQ: Variable Shift Packed Quadword Data Left Logical. +// +// Forms: +// +// VPSLLVQ xmm xmm xmm +// VPSLLVQ m128 xmm xmm +// VPSLLVQ ymm ymm ymm +// VPSLLVQ m256 ymm ymm +// Construct and append a VPSLLVQ instruction to the active function. +func (c *Context) VPSLLVQ(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPSLLVQ(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSLLVQ: Variable Shift Packed Quadword Data Left Logical. +// +// Forms: +// +// VPSLLVQ xmm xmm xmm +// VPSLLVQ m128 xmm xmm +// VPSLLVQ ymm ymm ymm +// VPSLLVQ m256 ymm ymm +// Construct and append a VPSLLVQ instruction to the active function. +// Operates on the global context. +func VPSLLVQ(mxy, xy, xy1 operand.Op) { ctx.VPSLLVQ(mxy, xy, xy1) } + +// VPSLLW: Shift Packed Word Data Left Logical. +// +// Forms: +// +// VPSLLW imm8 xmm xmm +// VPSLLW xmm xmm xmm +// VPSLLW m128 xmm xmm +// VPSLLW imm8 ymm ymm +// VPSLLW xmm ymm ymm +// VPSLLW m128 ymm ymm +// Construct and append a VPSLLW instruction to the active function. +func (c *Context) VPSLLW(imx, xy, xy1 operand.Op) { + if inst, err := x86.VPSLLW(imx, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSLLW: Shift Packed Word Data Left Logical. +// +// Forms: +// +// VPSLLW imm8 xmm xmm +// VPSLLW xmm xmm xmm +// VPSLLW m128 xmm xmm +// VPSLLW imm8 ymm ymm +// VPSLLW xmm ymm ymm +// VPSLLW m128 ymm ymm +// Construct and append a VPSLLW instruction to the active function. +// Operates on the global context. +func VPSLLW(imx, xy, xy1 operand.Op) { ctx.VPSLLW(imx, xy, xy1) } + +// VPSRAD: Shift Packed Doubleword Data Right Arithmetic. +// +// Forms: +// +// VPSRAD imm8 xmm xmm +// VPSRAD xmm xmm xmm +// VPSRAD m128 xmm xmm +// VPSRAD imm8 ymm ymm +// VPSRAD xmm ymm ymm +// VPSRAD m128 ymm ymm +// Construct and append a VPSRAD instruction to the active function. +func (c *Context) VPSRAD(imx, xy, xy1 operand.Op) { + if inst, err := x86.VPSRAD(imx, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSRAD: Shift Packed Doubleword Data Right Arithmetic. +// +// Forms: +// +// VPSRAD imm8 xmm xmm +// VPSRAD xmm xmm xmm +// VPSRAD m128 xmm xmm +// VPSRAD imm8 ymm ymm +// VPSRAD xmm ymm ymm +// VPSRAD m128 ymm ymm +// Construct and append a VPSRAD instruction to the active function. +// Operates on the global context. +func VPSRAD(imx, xy, xy1 operand.Op) { ctx.VPSRAD(imx, xy, xy1) } + +// VPSRAVD: Variable Shift Packed Doubleword Data Right Arithmetic. +// +// Forms: +// +// VPSRAVD xmm xmm xmm +// VPSRAVD m128 xmm xmm +// VPSRAVD ymm ymm ymm +// VPSRAVD m256 ymm ymm +// Construct and append a VPSRAVD instruction to the active function. +func (c *Context) VPSRAVD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPSRAVD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSRAVD: Variable Shift Packed Doubleword Data Right Arithmetic. +// +// Forms: +// +// VPSRAVD xmm xmm xmm +// VPSRAVD m128 xmm xmm +// VPSRAVD ymm ymm ymm +// VPSRAVD m256 ymm ymm +// Construct and append a VPSRAVD instruction to the active function. +// Operates on the global context. +func VPSRAVD(mxy, xy, xy1 operand.Op) { ctx.VPSRAVD(mxy, xy, xy1) } + +// VPSRAW: Shift Packed Word Data Right Arithmetic. +// +// Forms: +// +// VPSRAW imm8 xmm xmm +// VPSRAW xmm xmm xmm +// VPSRAW m128 xmm xmm +// VPSRAW imm8 ymm ymm +// VPSRAW xmm ymm ymm +// VPSRAW m128 ymm ymm +// Construct and append a VPSRAW instruction to the active function. +func (c *Context) VPSRAW(imx, xy, xy1 operand.Op) { + if inst, err := x86.VPSRAW(imx, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSRAW: Shift Packed Word Data Right Arithmetic. +// +// Forms: +// +// VPSRAW imm8 xmm xmm +// VPSRAW xmm xmm xmm +// VPSRAW m128 xmm xmm +// VPSRAW imm8 ymm ymm +// VPSRAW xmm ymm ymm +// VPSRAW m128 ymm ymm +// Construct and append a VPSRAW instruction to the active function. +// Operates on the global context. +func VPSRAW(imx, xy, xy1 operand.Op) { ctx.VPSRAW(imx, xy, xy1) } + +// VPSRLD: Shift Packed Doubleword Data Right Logical. +// +// Forms: +// +// VPSRLD imm8 xmm xmm +// VPSRLD xmm xmm xmm +// VPSRLD m128 xmm xmm +// VPSRLD imm8 ymm ymm +// VPSRLD xmm ymm ymm +// VPSRLD m128 ymm ymm +// Construct and append a VPSRLD instruction to the active function. +func (c *Context) VPSRLD(imx, xy, xy1 operand.Op) { + if inst, err := x86.VPSRLD(imx, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSRLD: Shift Packed Doubleword Data Right Logical. +// +// Forms: +// +// VPSRLD imm8 xmm xmm +// VPSRLD xmm xmm xmm +// VPSRLD m128 xmm xmm +// VPSRLD imm8 ymm ymm +// VPSRLD xmm ymm ymm +// VPSRLD m128 ymm ymm +// Construct and append a VPSRLD instruction to the active function. +// Operates on the global context. +func VPSRLD(imx, xy, xy1 operand.Op) { ctx.VPSRLD(imx, xy, xy1) } + +// VPSRLDQ: Shift Packed Double Quadword Right Logical. +// +// Forms: +// +// VPSRLDQ imm8 xmm xmm +// VPSRLDQ imm8 ymm ymm +// Construct and append a VPSRLDQ instruction to the active function. +func (c *Context) VPSRLDQ(i, xy, xy1 operand.Op) { + if inst, err := x86.VPSRLDQ(i, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSRLDQ: Shift Packed Double Quadword Right Logical. +// +// Forms: +// +// VPSRLDQ imm8 xmm xmm +// VPSRLDQ imm8 ymm ymm +// Construct and append a VPSRLDQ instruction to the active function. +// Operates on the global context. +func VPSRLDQ(i, xy, xy1 operand.Op) { ctx.VPSRLDQ(i, xy, xy1) } + +// VPSRLQ: Shift Packed Quadword Data Right Logical. +// +// Forms: +// +// VPSRLQ imm8 xmm xmm +// VPSRLQ xmm xmm xmm +// VPSRLQ m128 xmm xmm +// VPSRLQ imm8 ymm ymm +// VPSRLQ xmm ymm ymm +// VPSRLQ m128 ymm ymm +// Construct and append a VPSRLQ instruction to the active function. +func (c *Context) VPSRLQ(imx, xy, xy1 operand.Op) { + if inst, err := x86.VPSRLQ(imx, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSRLQ: Shift Packed Quadword Data Right Logical. +// +// Forms: +// +// VPSRLQ imm8 xmm xmm +// VPSRLQ xmm xmm xmm +// VPSRLQ m128 xmm xmm +// VPSRLQ imm8 ymm ymm +// VPSRLQ xmm ymm ymm +// VPSRLQ m128 ymm ymm +// Construct and append a VPSRLQ instruction to the active function. +// Operates on the global context. +func VPSRLQ(imx, xy, xy1 operand.Op) { ctx.VPSRLQ(imx, xy, xy1) } + +// VPSRLVD: Variable Shift Packed Doubleword Data Right Logical. +// +// Forms: +// +// VPSRLVD xmm xmm xmm +// VPSRLVD m128 xmm xmm +// VPSRLVD ymm ymm ymm +// VPSRLVD m256 ymm ymm +// Construct and append a VPSRLVD instruction to the active function. +func (c *Context) VPSRLVD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPSRLVD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSRLVD: Variable Shift Packed Doubleword Data Right Logical. +// +// Forms: +// +// VPSRLVD xmm xmm xmm +// VPSRLVD m128 xmm xmm +// VPSRLVD ymm ymm ymm +// VPSRLVD m256 ymm ymm +// Construct and append a VPSRLVD instruction to the active function. +// Operates on the global context. +func VPSRLVD(mxy, xy, xy1 operand.Op) { ctx.VPSRLVD(mxy, xy, xy1) } + +// VPSRLVQ: Variable Shift Packed Quadword Data Right Logical. +// +// Forms: +// +// VPSRLVQ xmm xmm xmm +// VPSRLVQ m128 xmm xmm +// VPSRLVQ ymm ymm ymm +// VPSRLVQ m256 ymm ymm +// Construct and append a VPSRLVQ instruction to the active function. +func (c *Context) VPSRLVQ(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPSRLVQ(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSRLVQ: Variable Shift Packed Quadword Data Right Logical. +// +// Forms: +// +// VPSRLVQ xmm xmm xmm +// VPSRLVQ m128 xmm xmm +// VPSRLVQ ymm ymm ymm +// VPSRLVQ m256 ymm ymm +// Construct and append a VPSRLVQ instruction to the active function. +// Operates on the global context. +func VPSRLVQ(mxy, xy, xy1 operand.Op) { ctx.VPSRLVQ(mxy, xy, xy1) } + +// VPSRLW: Shift Packed Word Data Right Logical. +// +// Forms: +// +// VPSRLW imm8 xmm xmm +// VPSRLW xmm xmm xmm +// VPSRLW m128 xmm xmm +// VPSRLW imm8 ymm ymm +// VPSRLW xmm ymm ymm +// VPSRLW m128 ymm ymm +// Construct and append a VPSRLW instruction to the active function. +func (c *Context) VPSRLW(imx, xy, xy1 operand.Op) { + if inst, err := x86.VPSRLW(imx, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSRLW: Shift Packed Word Data Right Logical. +// +// Forms: +// +// VPSRLW imm8 xmm xmm +// VPSRLW xmm xmm xmm +// VPSRLW m128 xmm xmm +// VPSRLW imm8 ymm ymm +// VPSRLW xmm ymm ymm +// VPSRLW m128 ymm ymm +// Construct and append a VPSRLW instruction to the active function. +// Operates on the global context. +func VPSRLW(imx, xy, xy1 operand.Op) { ctx.VPSRLW(imx, xy, xy1) } + +// VPSUBB: Subtract Packed Byte Integers. +// +// Forms: +// +// VPSUBB xmm xmm xmm +// VPSUBB m128 xmm xmm +// VPSUBB ymm ymm ymm +// VPSUBB m256 ymm ymm +// Construct and append a VPSUBB instruction to the active function. +func (c *Context) VPSUBB(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPSUBB(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSUBB: Subtract Packed Byte Integers. +// +// Forms: +// +// VPSUBB xmm xmm xmm +// VPSUBB m128 xmm xmm +// VPSUBB ymm ymm ymm +// VPSUBB m256 ymm ymm +// Construct and append a VPSUBB instruction to the active function. +// Operates on the global context. +func VPSUBB(mxy, xy, xy1 operand.Op) { ctx.VPSUBB(mxy, xy, xy1) } + +// VPSUBD: Subtract Packed Doubleword Integers. +// +// Forms: +// +// VPSUBD xmm xmm xmm +// VPSUBD m128 xmm xmm +// VPSUBD ymm ymm ymm +// VPSUBD m256 ymm ymm +// Construct and append a VPSUBD instruction to the active function. +func (c *Context) VPSUBD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPSUBD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSUBD: Subtract Packed Doubleword Integers. +// +// Forms: +// +// VPSUBD xmm xmm xmm +// VPSUBD m128 xmm xmm +// VPSUBD ymm ymm ymm +// VPSUBD m256 ymm ymm +// Construct and append a VPSUBD instruction to the active function. +// Operates on the global context. +func VPSUBD(mxy, xy, xy1 operand.Op) { ctx.VPSUBD(mxy, xy, xy1) } + +// VPSUBQ: Subtract Packed Quadword Integers. +// +// Forms: +// +// VPSUBQ xmm xmm xmm +// VPSUBQ m128 xmm xmm +// VPSUBQ ymm ymm ymm +// VPSUBQ m256 ymm ymm +// Construct and append a VPSUBQ instruction to the active function. +func (c *Context) VPSUBQ(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPSUBQ(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSUBQ: Subtract Packed Quadword Integers. +// +// Forms: +// +// VPSUBQ xmm xmm xmm +// VPSUBQ m128 xmm xmm +// VPSUBQ ymm ymm ymm +// VPSUBQ m256 ymm ymm +// Construct and append a VPSUBQ instruction to the active function. +// Operates on the global context. +func VPSUBQ(mxy, xy, xy1 operand.Op) { ctx.VPSUBQ(mxy, xy, xy1) } + +// VPSUBSB: Subtract Packed Signed Byte Integers with Signed Saturation. +// +// Forms: +// +// VPSUBSB xmm xmm xmm +// VPSUBSB m128 xmm xmm +// VPSUBSB ymm ymm ymm +// VPSUBSB m256 ymm ymm +// Construct and append a VPSUBSB instruction to the active function. +func (c *Context) VPSUBSB(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPSUBSB(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSUBSB: Subtract Packed Signed Byte Integers with Signed Saturation. +// +// Forms: +// +// VPSUBSB xmm xmm xmm +// VPSUBSB m128 xmm xmm +// VPSUBSB ymm ymm ymm +// VPSUBSB m256 ymm ymm +// Construct and append a VPSUBSB instruction to the active function. +// Operates on the global context. +func VPSUBSB(mxy, xy, xy1 operand.Op) { ctx.VPSUBSB(mxy, xy, xy1) } + +// VPSUBSW: Subtract Packed Signed Word Integers with Signed Saturation. +// +// Forms: +// +// VPSUBSW xmm xmm xmm +// VPSUBSW m128 xmm xmm +// VPSUBSW ymm ymm ymm +// VPSUBSW m256 ymm ymm +// Construct and append a VPSUBSW instruction to the active function. +func (c *Context) VPSUBSW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPSUBSW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSUBSW: Subtract Packed Signed Word Integers with Signed Saturation. +// +// Forms: +// +// VPSUBSW xmm xmm xmm +// VPSUBSW m128 xmm xmm +// VPSUBSW ymm ymm ymm +// VPSUBSW m256 ymm ymm +// Construct and append a VPSUBSW instruction to the active function. +// Operates on the global context. +func VPSUBSW(mxy, xy, xy1 operand.Op) { ctx.VPSUBSW(mxy, xy, xy1) } + +// VPSUBUSB: Subtract Packed Unsigned Byte Integers with Unsigned Saturation. +// +// Forms: +// +// VPSUBUSB xmm xmm xmm +// VPSUBUSB m128 xmm xmm +// VPSUBUSB ymm ymm ymm +// VPSUBUSB m256 ymm ymm +// Construct and append a VPSUBUSB instruction to the active function. +func (c *Context) VPSUBUSB(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPSUBUSB(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSUBUSB: Subtract Packed Unsigned Byte Integers with Unsigned Saturation. +// +// Forms: +// +// VPSUBUSB xmm xmm xmm +// VPSUBUSB m128 xmm xmm +// VPSUBUSB ymm ymm ymm +// VPSUBUSB m256 ymm ymm +// Construct and append a VPSUBUSB instruction to the active function. +// Operates on the global context. +func VPSUBUSB(mxy, xy, xy1 operand.Op) { ctx.VPSUBUSB(mxy, xy, xy1) } + +// VPSUBUSW: Subtract Packed Unsigned Word Integers with Unsigned Saturation. +// +// Forms: +// +// VPSUBUSW xmm xmm xmm +// VPSUBUSW m128 xmm xmm +// VPSUBUSW ymm ymm ymm +// VPSUBUSW m256 ymm ymm +// Construct and append a VPSUBUSW instruction to the active function. +func (c *Context) VPSUBUSW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPSUBUSW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSUBUSW: Subtract Packed Unsigned Word Integers with Unsigned Saturation. +// +// Forms: +// +// VPSUBUSW xmm xmm xmm +// VPSUBUSW m128 xmm xmm +// VPSUBUSW ymm ymm ymm +// VPSUBUSW m256 ymm ymm +// Construct and append a VPSUBUSW instruction to the active function. +// Operates on the global context. +func VPSUBUSW(mxy, xy, xy1 operand.Op) { ctx.VPSUBUSW(mxy, xy, xy1) } + +// VPSUBW: Subtract Packed Word Integers. +// +// Forms: +// +// VPSUBW xmm xmm xmm +// VPSUBW m128 xmm xmm +// VPSUBW ymm ymm ymm +// VPSUBW m256 ymm ymm +// Construct and append a VPSUBW instruction to the active function. +func (c *Context) VPSUBW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPSUBW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPSUBW: Subtract Packed Word Integers. +// +// Forms: +// +// VPSUBW xmm xmm xmm +// VPSUBW m128 xmm xmm +// VPSUBW ymm ymm ymm +// VPSUBW m256 ymm ymm +// Construct and append a VPSUBW instruction to the active function. +// Operates on the global context. +func VPSUBW(mxy, xy, xy1 operand.Op) { ctx.VPSUBW(mxy, xy, xy1) } + +// VPTEST: Packed Logical Compare. +// +// Forms: +// +// VPTEST xmm xmm +// VPTEST m128 xmm +// VPTEST ymm ymm +// VPTEST m256 ymm +// Construct and append a VPTEST instruction to the active function. +func (c *Context) VPTEST(mxy, xy operand.Op) { + if inst, err := x86.VPTEST(mxy, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPTEST: Packed Logical Compare. +// +// Forms: +// +// VPTEST xmm xmm +// VPTEST m128 xmm +// VPTEST ymm ymm +// VPTEST m256 ymm +// Construct and append a VPTEST instruction to the active function. +// Operates on the global context. +func VPTEST(mxy, xy operand.Op) { ctx.VPTEST(mxy, xy) } + +// VPUNPCKHBW: Unpack and Interleave High-Order Bytes into Words. +// +// Forms: +// +// VPUNPCKHBW xmm xmm xmm +// VPUNPCKHBW m128 xmm xmm +// VPUNPCKHBW ymm ymm ymm +// VPUNPCKHBW m256 ymm ymm +// Construct and append a VPUNPCKHBW instruction to the active function. +func (c *Context) VPUNPCKHBW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPUNPCKHBW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPUNPCKHBW: Unpack and Interleave High-Order Bytes into Words. +// +// Forms: +// +// VPUNPCKHBW xmm xmm xmm +// VPUNPCKHBW m128 xmm xmm +// VPUNPCKHBW ymm ymm ymm +// VPUNPCKHBW m256 ymm ymm +// Construct and append a VPUNPCKHBW instruction to the active function. +// Operates on the global context. +func VPUNPCKHBW(mxy, xy, xy1 operand.Op) { ctx.VPUNPCKHBW(mxy, xy, xy1) } + +// VPUNPCKHDQ: Unpack and Interleave High-Order Doublewords into Quadwords. +// +// Forms: +// +// VPUNPCKHDQ xmm xmm xmm +// VPUNPCKHDQ m128 xmm xmm +// VPUNPCKHDQ ymm ymm ymm +// VPUNPCKHDQ m256 ymm ymm +// Construct and append a VPUNPCKHDQ instruction to the active function. +func (c *Context) VPUNPCKHDQ(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPUNPCKHDQ(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPUNPCKHDQ: Unpack and Interleave High-Order Doublewords into Quadwords. +// +// Forms: +// +// VPUNPCKHDQ xmm xmm xmm +// VPUNPCKHDQ m128 xmm xmm +// VPUNPCKHDQ ymm ymm ymm +// VPUNPCKHDQ m256 ymm ymm +// Construct and append a VPUNPCKHDQ instruction to the active function. +// Operates on the global context. +func VPUNPCKHDQ(mxy, xy, xy1 operand.Op) { ctx.VPUNPCKHDQ(mxy, xy, xy1) } + +// VPUNPCKHQDQ: Unpack and Interleave High-Order Quadwords into Double Quadwords. +// +// Forms: +// +// VPUNPCKHQDQ xmm xmm xmm +// VPUNPCKHQDQ m128 xmm xmm +// VPUNPCKHQDQ ymm ymm ymm +// VPUNPCKHQDQ m256 ymm ymm +// Construct and append a VPUNPCKHQDQ instruction to the active function. +func (c *Context) VPUNPCKHQDQ(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPUNPCKHQDQ(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPUNPCKHQDQ: Unpack and Interleave High-Order Quadwords into Double Quadwords. +// +// Forms: +// +// VPUNPCKHQDQ xmm xmm xmm +// VPUNPCKHQDQ m128 xmm xmm +// VPUNPCKHQDQ ymm ymm ymm +// VPUNPCKHQDQ m256 ymm ymm +// Construct and append a VPUNPCKHQDQ instruction to the active function. +// Operates on the global context. +func VPUNPCKHQDQ(mxy, xy, xy1 operand.Op) { ctx.VPUNPCKHQDQ(mxy, xy, xy1) } + +// VPUNPCKHWD: Unpack and Interleave High-Order Words into Doublewords. +// +// Forms: +// +// VPUNPCKHWD xmm xmm xmm +// VPUNPCKHWD m128 xmm xmm +// VPUNPCKHWD ymm ymm ymm +// VPUNPCKHWD m256 ymm ymm +// Construct and append a VPUNPCKHWD instruction to the active function. +func (c *Context) VPUNPCKHWD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPUNPCKHWD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPUNPCKHWD: Unpack and Interleave High-Order Words into Doublewords. +// +// Forms: +// +// VPUNPCKHWD xmm xmm xmm +// VPUNPCKHWD m128 xmm xmm +// VPUNPCKHWD ymm ymm ymm +// VPUNPCKHWD m256 ymm ymm +// Construct and append a VPUNPCKHWD instruction to the active function. +// Operates on the global context. +func VPUNPCKHWD(mxy, xy, xy1 operand.Op) { ctx.VPUNPCKHWD(mxy, xy, xy1) } + +// VPUNPCKLBW: Unpack and Interleave Low-Order Bytes into Words. +// +// Forms: +// +// VPUNPCKLBW xmm xmm xmm +// VPUNPCKLBW m128 xmm xmm +// VPUNPCKLBW ymm ymm ymm +// VPUNPCKLBW m256 ymm ymm +// Construct and append a VPUNPCKLBW instruction to the active function. +func (c *Context) VPUNPCKLBW(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPUNPCKLBW(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPUNPCKLBW: Unpack and Interleave Low-Order Bytes into Words. +// +// Forms: +// +// VPUNPCKLBW xmm xmm xmm +// VPUNPCKLBW m128 xmm xmm +// VPUNPCKLBW ymm ymm ymm +// VPUNPCKLBW m256 ymm ymm +// Construct and append a VPUNPCKLBW instruction to the active function. +// Operates on the global context. +func VPUNPCKLBW(mxy, xy, xy1 operand.Op) { ctx.VPUNPCKLBW(mxy, xy, xy1) } + +// VPUNPCKLDQ: Unpack and Interleave Low-Order Doublewords into Quadwords. +// +// Forms: +// +// VPUNPCKLDQ xmm xmm xmm +// VPUNPCKLDQ m128 xmm xmm +// VPUNPCKLDQ ymm ymm ymm +// VPUNPCKLDQ m256 ymm ymm +// Construct and append a VPUNPCKLDQ instruction to the active function. +func (c *Context) VPUNPCKLDQ(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPUNPCKLDQ(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPUNPCKLDQ: Unpack and Interleave Low-Order Doublewords into Quadwords. +// +// Forms: +// +// VPUNPCKLDQ xmm xmm xmm +// VPUNPCKLDQ m128 xmm xmm +// VPUNPCKLDQ ymm ymm ymm +// VPUNPCKLDQ m256 ymm ymm +// Construct and append a VPUNPCKLDQ instruction to the active function. +// Operates on the global context. +func VPUNPCKLDQ(mxy, xy, xy1 operand.Op) { ctx.VPUNPCKLDQ(mxy, xy, xy1) } + +// VPUNPCKLQDQ: Unpack and Interleave Low-Order Quadwords into Double Quadwords. +// +// Forms: +// +// VPUNPCKLQDQ xmm xmm xmm +// VPUNPCKLQDQ m128 xmm xmm +// VPUNPCKLQDQ ymm ymm ymm +// VPUNPCKLQDQ m256 ymm ymm +// Construct and append a VPUNPCKLQDQ instruction to the active function. +func (c *Context) VPUNPCKLQDQ(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPUNPCKLQDQ(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPUNPCKLQDQ: Unpack and Interleave Low-Order Quadwords into Double Quadwords. +// +// Forms: +// +// VPUNPCKLQDQ xmm xmm xmm +// VPUNPCKLQDQ m128 xmm xmm +// VPUNPCKLQDQ ymm ymm ymm +// VPUNPCKLQDQ m256 ymm ymm +// Construct and append a VPUNPCKLQDQ instruction to the active function. +// Operates on the global context. +func VPUNPCKLQDQ(mxy, xy, xy1 operand.Op) { ctx.VPUNPCKLQDQ(mxy, xy, xy1) } + +// VPUNPCKLWD: Unpack and Interleave Low-Order Words into Doublewords. +// +// Forms: +// +// VPUNPCKLWD xmm xmm xmm +// VPUNPCKLWD m128 xmm xmm +// VPUNPCKLWD ymm ymm ymm +// VPUNPCKLWD m256 ymm ymm +// Construct and append a VPUNPCKLWD instruction to the active function. +func (c *Context) VPUNPCKLWD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPUNPCKLWD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPUNPCKLWD: Unpack and Interleave Low-Order Words into Doublewords. +// +// Forms: +// +// VPUNPCKLWD xmm xmm xmm +// VPUNPCKLWD m128 xmm xmm +// VPUNPCKLWD ymm ymm ymm +// VPUNPCKLWD m256 ymm ymm +// Construct and append a VPUNPCKLWD instruction to the active function. +// Operates on the global context. +func VPUNPCKLWD(mxy, xy, xy1 operand.Op) { ctx.VPUNPCKLWD(mxy, xy, xy1) } + +// VPXOR: Packed Bitwise Logical Exclusive OR. +// +// Forms: +// +// VPXOR xmm xmm xmm +// VPXOR m128 xmm xmm +// VPXOR ymm ymm ymm +// VPXOR m256 ymm ymm +// Construct and append a VPXOR instruction to the active function. +func (c *Context) VPXOR(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VPXOR(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VPXOR: Packed Bitwise Logical Exclusive OR. +// +// Forms: +// +// VPXOR xmm xmm xmm +// VPXOR m128 xmm xmm +// VPXOR ymm ymm ymm +// VPXOR m256 ymm ymm +// Construct and append a VPXOR instruction to the active function. +// Operates on the global context. +func VPXOR(mxy, xy, xy1 operand.Op) { ctx.VPXOR(mxy, xy, xy1) } + +// VRCPPS: Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VRCPPS xmm xmm +// VRCPPS m128 xmm +// VRCPPS ymm ymm +// VRCPPS m256 ymm +// Construct and append a VRCPPS instruction to the active function. +func (c *Context) VRCPPS(mxy, xy operand.Op) { + if inst, err := x86.VRCPPS(mxy, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VRCPPS: Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VRCPPS xmm xmm +// VRCPPS m128 xmm +// VRCPPS ymm ymm +// VRCPPS m256 ymm +// Construct and append a VRCPPS instruction to the active function. +// Operates on the global context. +func VRCPPS(mxy, xy operand.Op) { ctx.VRCPPS(mxy, xy) } + +// VRCPSS: Compute Approximate Reciprocal of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VRCPSS xmm xmm xmm +// VRCPSS m32 xmm xmm +// Construct and append a VRCPSS instruction to the active function. +func (c *Context) VRCPSS(mx, x, x1 operand.Op) { + if inst, err := x86.VRCPSS(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VRCPSS: Compute Approximate Reciprocal of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VRCPSS xmm xmm xmm +// VRCPSS m32 xmm xmm +// Construct and append a VRCPSS instruction to the active function. +// Operates on the global context. +func VRCPSS(mx, x, x1 operand.Op) { ctx.VRCPSS(mx, x, x1) } + +// VROUNDPD: Round Packed Double Precision Floating-Point Values. +// +// Forms: +// +// VROUNDPD imm8 xmm xmm +// VROUNDPD imm8 m128 xmm +// VROUNDPD imm8 ymm ymm +// VROUNDPD imm8 m256 ymm +// Construct and append a VROUNDPD instruction to the active function. +func (c *Context) VROUNDPD(i, mxy, xy operand.Op) { + if inst, err := x86.VROUNDPD(i, mxy, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VROUNDPD: Round Packed Double Precision Floating-Point Values. +// +// Forms: +// +// VROUNDPD imm8 xmm xmm +// VROUNDPD imm8 m128 xmm +// VROUNDPD imm8 ymm ymm +// VROUNDPD imm8 m256 ymm +// Construct and append a VROUNDPD instruction to the active function. +// Operates on the global context. +func VROUNDPD(i, mxy, xy operand.Op) { ctx.VROUNDPD(i, mxy, xy) } + +// VROUNDPS: Round Packed Single Precision Floating-Point Values. +// +// Forms: +// +// VROUNDPS imm8 xmm xmm +// VROUNDPS imm8 m128 xmm +// VROUNDPS imm8 ymm ymm +// VROUNDPS imm8 m256 ymm +// Construct and append a VROUNDPS instruction to the active function. +func (c *Context) VROUNDPS(i, mxy, xy operand.Op) { + if inst, err := x86.VROUNDPS(i, mxy, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VROUNDPS: Round Packed Single Precision Floating-Point Values. +// +// Forms: +// +// VROUNDPS imm8 xmm xmm +// VROUNDPS imm8 m128 xmm +// VROUNDPS imm8 ymm ymm +// VROUNDPS imm8 m256 ymm +// Construct and append a VROUNDPS instruction to the active function. +// Operates on the global context. +func VROUNDPS(i, mxy, xy operand.Op) { ctx.VROUNDPS(i, mxy, xy) } + +// VROUNDSD: Round Scalar Double Precision Floating-Point Values. +// +// Forms: +// +// VROUNDSD imm8 xmm xmm xmm +// VROUNDSD imm8 m64 xmm xmm +// Construct and append a VROUNDSD instruction to the active function. +func (c *Context) VROUNDSD(i, mx, x, x1 operand.Op) { + if inst, err := x86.VROUNDSD(i, mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VROUNDSD: Round Scalar Double Precision Floating-Point Values. +// +// Forms: +// +// VROUNDSD imm8 xmm xmm xmm +// VROUNDSD imm8 m64 xmm xmm +// Construct and append a VROUNDSD instruction to the active function. +// Operates on the global context. +func VROUNDSD(i, mx, x, x1 operand.Op) { ctx.VROUNDSD(i, mx, x, x1) } + +// VROUNDSS: Round Scalar Single Precision Floating-Point Values. +// +// Forms: +// +// VROUNDSS imm8 xmm xmm xmm +// VROUNDSS imm8 m32 xmm xmm +// Construct and append a VROUNDSS instruction to the active function. +func (c *Context) VROUNDSS(i, mx, x, x1 operand.Op) { + if inst, err := x86.VROUNDSS(i, mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VROUNDSS: Round Scalar Single Precision Floating-Point Values. +// +// Forms: +// +// VROUNDSS imm8 xmm xmm xmm +// VROUNDSS imm8 m32 xmm xmm +// Construct and append a VROUNDSS instruction to the active function. +// Operates on the global context. +func VROUNDSS(i, mx, x, x1 operand.Op) { ctx.VROUNDSS(i, mx, x, x1) } + +// VRSQRTPS: Compute Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VRSQRTPS xmm xmm +// VRSQRTPS m128 xmm +// VRSQRTPS ymm ymm +// VRSQRTPS m256 ymm +// Construct and append a VRSQRTPS instruction to the active function. +func (c *Context) VRSQRTPS(mxy, xy operand.Op) { + if inst, err := x86.VRSQRTPS(mxy, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VRSQRTPS: Compute Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VRSQRTPS xmm xmm +// VRSQRTPS m128 xmm +// VRSQRTPS ymm ymm +// VRSQRTPS m256 ymm +// Construct and append a VRSQRTPS instruction to the active function. +// Operates on the global context. +func VRSQRTPS(mxy, xy operand.Op) { ctx.VRSQRTPS(mxy, xy) } + +// VRSQRTSS: Compute Reciprocal of Square Root of Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// VRSQRTSS xmm xmm xmm +// VRSQRTSS m32 xmm xmm +// Construct and append a VRSQRTSS instruction to the active function. +func (c *Context) VRSQRTSS(mx, x, x1 operand.Op) { + if inst, err := x86.VRSQRTSS(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VRSQRTSS: Compute Reciprocal of Square Root of Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// VRSQRTSS xmm xmm xmm +// VRSQRTSS m32 xmm xmm +// Construct and append a VRSQRTSS instruction to the active function. +// Operates on the global context. +func VRSQRTSS(mx, x, x1 operand.Op) { ctx.VRSQRTSS(mx, x, x1) } + +// VSHUFPD: Shuffle Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VSHUFPD imm8 xmm xmm xmm +// VSHUFPD imm8 m128 xmm xmm +// VSHUFPD imm8 ymm ymm ymm +// VSHUFPD imm8 m256 ymm ymm +// Construct and append a VSHUFPD instruction to the active function. +func (c *Context) VSHUFPD(i, mxy, xy, xy1 operand.Op) { + if inst, err := x86.VSHUFPD(i, mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VSHUFPD: Shuffle Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VSHUFPD imm8 xmm xmm xmm +// VSHUFPD imm8 m128 xmm xmm +// VSHUFPD imm8 ymm ymm ymm +// VSHUFPD imm8 m256 ymm ymm +// Construct and append a VSHUFPD instruction to the active function. +// Operates on the global context. +func VSHUFPD(i, mxy, xy, xy1 operand.Op) { ctx.VSHUFPD(i, mxy, xy, xy1) } + +// VSHUFPS: Shuffle Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VSHUFPS imm8 xmm xmm xmm +// VSHUFPS imm8 m128 xmm xmm +// VSHUFPS imm8 ymm ymm ymm +// VSHUFPS imm8 m256 ymm ymm +// Construct and append a VSHUFPS instruction to the active function. +func (c *Context) VSHUFPS(i, mxy, xy, xy1 operand.Op) { + if inst, err := x86.VSHUFPS(i, mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VSHUFPS: Shuffle Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VSHUFPS imm8 xmm xmm xmm +// VSHUFPS imm8 m128 xmm xmm +// VSHUFPS imm8 ymm ymm ymm +// VSHUFPS imm8 m256 ymm ymm +// Construct and append a VSHUFPS instruction to the active function. +// Operates on the global context. +func VSHUFPS(i, mxy, xy, xy1 operand.Op) { ctx.VSHUFPS(i, mxy, xy, xy1) } + +// VSQRTPD: Compute Square Roots of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VSQRTPD xmm xmm +// VSQRTPD m128 xmm +// VSQRTPD ymm ymm +// VSQRTPD m256 ymm +// Construct and append a VSQRTPD instruction to the active function. +func (c *Context) VSQRTPD(mxy, xy operand.Op) { + if inst, err := x86.VSQRTPD(mxy, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VSQRTPD: Compute Square Roots of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VSQRTPD xmm xmm +// VSQRTPD m128 xmm +// VSQRTPD ymm ymm +// VSQRTPD m256 ymm +// Construct and append a VSQRTPD instruction to the active function. +// Operates on the global context. +func VSQRTPD(mxy, xy operand.Op) { ctx.VSQRTPD(mxy, xy) } + +// VSQRTPS: Compute Square Roots of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VSQRTPS xmm xmm +// VSQRTPS m128 xmm +// VSQRTPS ymm ymm +// VSQRTPS m256 ymm +// Construct and append a VSQRTPS instruction to the active function. +func (c *Context) VSQRTPS(mxy, xy operand.Op) { + if inst, err := x86.VSQRTPS(mxy, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VSQRTPS: Compute Square Roots of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VSQRTPS xmm xmm +// VSQRTPS m128 xmm +// VSQRTPS ymm ymm +// VSQRTPS m256 ymm +// Construct and append a VSQRTPS instruction to the active function. +// Operates on the global context. +func VSQRTPS(mxy, xy operand.Op) { ctx.VSQRTPS(mxy, xy) } + +// VSQRTSD: Compute Square Root of Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// VSQRTSD xmm xmm xmm +// VSQRTSD m64 xmm xmm +// Construct and append a VSQRTSD instruction to the active function. +func (c *Context) VSQRTSD(mx, x, x1 operand.Op) { + if inst, err := x86.VSQRTSD(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VSQRTSD: Compute Square Root of Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// VSQRTSD xmm xmm xmm +// VSQRTSD m64 xmm xmm +// Construct and append a VSQRTSD instruction to the active function. +// Operates on the global context. +func VSQRTSD(mx, x, x1 operand.Op) { ctx.VSQRTSD(mx, x, x1) } + +// VSQRTSS: Compute Square Root of Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// VSQRTSS xmm xmm xmm +// VSQRTSS m32 xmm xmm +// Construct and append a VSQRTSS instruction to the active function. +func (c *Context) VSQRTSS(mx, x, x1 operand.Op) { + if inst, err := x86.VSQRTSS(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VSQRTSS: Compute Square Root of Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// VSQRTSS xmm xmm xmm +// VSQRTSS m32 xmm xmm +// Construct and append a VSQRTSS instruction to the active function. +// Operates on the global context. +func VSQRTSS(mx, x, x1 operand.Op) { ctx.VSQRTSS(mx, x, x1) } + +// VSTMXCSR: Store MXCSR Register State. +// +// Forms: +// +// VSTMXCSR m32 +// Construct and append a VSTMXCSR instruction to the active function. +func (c *Context) VSTMXCSR(m operand.Op) { + if inst, err := x86.VSTMXCSR(m); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VSTMXCSR: Store MXCSR Register State. +// +// Forms: +// +// VSTMXCSR m32 +// Construct and append a VSTMXCSR instruction to the active function. +// Operates on the global context. +func VSTMXCSR(m operand.Op) { ctx.VSTMXCSR(m) } + +// VSUBPD: Subtract Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VSUBPD xmm xmm xmm +// VSUBPD m128 xmm xmm +// VSUBPD ymm ymm ymm +// VSUBPD m256 ymm ymm +// Construct and append a VSUBPD instruction to the active function. +func (c *Context) VSUBPD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VSUBPD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VSUBPD: Subtract Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VSUBPD xmm xmm xmm +// VSUBPD m128 xmm xmm +// VSUBPD ymm ymm ymm +// VSUBPD m256 ymm ymm +// Construct and append a VSUBPD instruction to the active function. +// Operates on the global context. +func VSUBPD(mxy, xy, xy1 operand.Op) { ctx.VSUBPD(mxy, xy, xy1) } + +// VSUBPS: Subtract Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VSUBPS xmm xmm xmm +// VSUBPS m128 xmm xmm +// VSUBPS ymm ymm ymm +// VSUBPS m256 ymm ymm +// Construct and append a VSUBPS instruction to the active function. +func (c *Context) VSUBPS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VSUBPS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VSUBPS: Subtract Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VSUBPS xmm xmm xmm +// VSUBPS m128 xmm xmm +// VSUBPS ymm ymm ymm +// VSUBPS m256 ymm ymm +// Construct and append a VSUBPS instruction to the active function. +// Operates on the global context. +func VSUBPS(mxy, xy, xy1 operand.Op) { ctx.VSUBPS(mxy, xy, xy1) } + +// VSUBSD: Subtract Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VSUBSD xmm xmm xmm +// VSUBSD m64 xmm xmm +// Construct and append a VSUBSD instruction to the active function. +func (c *Context) VSUBSD(mx, x, x1 operand.Op) { + if inst, err := x86.VSUBSD(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VSUBSD: Subtract Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VSUBSD xmm xmm xmm +// VSUBSD m64 xmm xmm +// Construct and append a VSUBSD instruction to the active function. +// Operates on the global context. +func VSUBSD(mx, x, x1 operand.Op) { ctx.VSUBSD(mx, x, x1) } + +// VSUBSS: Subtract Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VSUBSS xmm xmm xmm +// VSUBSS m32 xmm xmm +// Construct and append a VSUBSS instruction to the active function. +func (c *Context) VSUBSS(mx, x, x1 operand.Op) { + if inst, err := x86.VSUBSS(mx, x, x1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VSUBSS: Subtract Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VSUBSS xmm xmm xmm +// VSUBSS m32 xmm xmm +// Construct and append a VSUBSS instruction to the active function. +// Operates on the global context. +func VSUBSS(mx, x, x1 operand.Op) { ctx.VSUBSS(mx, x, x1) } + +// VTESTPD: Packed Double-Precision Floating-Point Bit Test. +// +// Forms: +// +// VTESTPD xmm xmm +// VTESTPD m128 xmm +// VTESTPD ymm ymm +// VTESTPD m256 ymm +// Construct and append a VTESTPD instruction to the active function. +func (c *Context) VTESTPD(mxy, xy operand.Op) { + if inst, err := x86.VTESTPD(mxy, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VTESTPD: Packed Double-Precision Floating-Point Bit Test. +// +// Forms: +// +// VTESTPD xmm xmm +// VTESTPD m128 xmm +// VTESTPD ymm ymm +// VTESTPD m256 ymm +// Construct and append a VTESTPD instruction to the active function. +// Operates on the global context. +func VTESTPD(mxy, xy operand.Op) { ctx.VTESTPD(mxy, xy) } + +// VTESTPS: Packed Single-Precision Floating-Point Bit Test. +// +// Forms: +// +// VTESTPS xmm xmm +// VTESTPS m128 xmm +// VTESTPS ymm ymm +// VTESTPS m256 ymm +// Construct and append a VTESTPS instruction to the active function. +func (c *Context) VTESTPS(mxy, xy operand.Op) { + if inst, err := x86.VTESTPS(mxy, xy); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VTESTPS: Packed Single-Precision Floating-Point Bit Test. +// +// Forms: +// +// VTESTPS xmm xmm +// VTESTPS m128 xmm +// VTESTPS ymm ymm +// VTESTPS m256 ymm +// Construct and append a VTESTPS instruction to the active function. +// Operates on the global context. +func VTESTPS(mxy, xy operand.Op) { ctx.VTESTPS(mxy, xy) } + +// VUCOMISD: Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// VUCOMISD xmm xmm +// VUCOMISD m64 xmm +// Construct and append a VUCOMISD instruction to the active function. +func (c *Context) VUCOMISD(mx, x operand.Op) { + if inst, err := x86.VUCOMISD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VUCOMISD: Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// VUCOMISD xmm xmm +// VUCOMISD m64 xmm +// Construct and append a VUCOMISD instruction to the active function. +// Operates on the global context. +func VUCOMISD(mx, x operand.Op) { ctx.VUCOMISD(mx, x) } + +// VUCOMISS: Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// VUCOMISS xmm xmm +// VUCOMISS m32 xmm +// Construct and append a VUCOMISS instruction to the active function. +func (c *Context) VUCOMISS(mx, x operand.Op) { + if inst, err := x86.VUCOMISS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VUCOMISS: Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// VUCOMISS xmm xmm +// VUCOMISS m32 xmm +// Construct and append a VUCOMISS instruction to the active function. +// Operates on the global context. +func VUCOMISS(mx, x operand.Op) { ctx.VUCOMISS(mx, x) } + +// VUNPCKHPD: Unpack and Interleave High Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VUNPCKHPD xmm xmm xmm +// VUNPCKHPD m128 xmm xmm +// VUNPCKHPD ymm ymm ymm +// VUNPCKHPD m256 ymm ymm +// Construct and append a VUNPCKHPD instruction to the active function. +func (c *Context) VUNPCKHPD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VUNPCKHPD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VUNPCKHPD: Unpack and Interleave High Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VUNPCKHPD xmm xmm xmm +// VUNPCKHPD m128 xmm xmm +// VUNPCKHPD ymm ymm ymm +// VUNPCKHPD m256 ymm ymm +// Construct and append a VUNPCKHPD instruction to the active function. +// Operates on the global context. +func VUNPCKHPD(mxy, xy, xy1 operand.Op) { ctx.VUNPCKHPD(mxy, xy, xy1) } + +// VUNPCKHPS: Unpack and Interleave High Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VUNPCKHPS xmm xmm xmm +// VUNPCKHPS m128 xmm xmm +// VUNPCKHPS ymm ymm ymm +// VUNPCKHPS m256 ymm ymm +// Construct and append a VUNPCKHPS instruction to the active function. +func (c *Context) VUNPCKHPS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VUNPCKHPS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VUNPCKHPS: Unpack and Interleave High Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VUNPCKHPS xmm xmm xmm +// VUNPCKHPS m128 xmm xmm +// VUNPCKHPS ymm ymm ymm +// VUNPCKHPS m256 ymm ymm +// Construct and append a VUNPCKHPS instruction to the active function. +// Operates on the global context. +func VUNPCKHPS(mxy, xy, xy1 operand.Op) { ctx.VUNPCKHPS(mxy, xy, xy1) } + +// VUNPCKLPD: Unpack and Interleave Low Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VUNPCKLPD xmm xmm xmm +// VUNPCKLPD m128 xmm xmm +// VUNPCKLPD ymm ymm ymm +// VUNPCKLPD m256 ymm ymm +// Construct and append a VUNPCKLPD instruction to the active function. +func (c *Context) VUNPCKLPD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VUNPCKLPD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VUNPCKLPD: Unpack and Interleave Low Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VUNPCKLPD xmm xmm xmm +// VUNPCKLPD m128 xmm xmm +// VUNPCKLPD ymm ymm ymm +// VUNPCKLPD m256 ymm ymm +// Construct and append a VUNPCKLPD instruction to the active function. +// Operates on the global context. +func VUNPCKLPD(mxy, xy, xy1 operand.Op) { ctx.VUNPCKLPD(mxy, xy, xy1) } + +// VUNPCKLPS: Unpack and Interleave Low Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VUNPCKLPS xmm xmm xmm +// VUNPCKLPS m128 xmm xmm +// VUNPCKLPS ymm ymm ymm +// VUNPCKLPS m256 ymm ymm +// Construct and append a VUNPCKLPS instruction to the active function. +func (c *Context) VUNPCKLPS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VUNPCKLPS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VUNPCKLPS: Unpack and Interleave Low Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VUNPCKLPS xmm xmm xmm +// VUNPCKLPS m128 xmm xmm +// VUNPCKLPS ymm ymm ymm +// VUNPCKLPS m256 ymm ymm +// Construct and append a VUNPCKLPS instruction to the active function. +// Operates on the global context. +func VUNPCKLPS(mxy, xy, xy1 operand.Op) { ctx.VUNPCKLPS(mxy, xy, xy1) } + +// VXORPD: Bitwise Logical XOR for Double-Precision Floating-Point Values. +// +// Forms: +// +// VXORPD xmm xmm xmm +// VXORPD m128 xmm xmm +// VXORPD ymm ymm ymm +// VXORPD m256 ymm ymm +// Construct and append a VXORPD instruction to the active function. +func (c *Context) VXORPD(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VXORPD(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VXORPD: Bitwise Logical XOR for Double-Precision Floating-Point Values. +// +// Forms: +// +// VXORPD xmm xmm xmm +// VXORPD m128 xmm xmm +// VXORPD ymm ymm ymm +// VXORPD m256 ymm ymm +// Construct and append a VXORPD instruction to the active function. +// Operates on the global context. +func VXORPD(mxy, xy, xy1 operand.Op) { ctx.VXORPD(mxy, xy, xy1) } + +// VXORPS: Bitwise Logical XOR for Single-Precision Floating-Point Values. +// +// Forms: +// +// VXORPS xmm xmm xmm +// VXORPS m128 xmm xmm +// VXORPS ymm ymm ymm +// VXORPS m256 ymm ymm +// Construct and append a VXORPS instruction to the active function. +func (c *Context) VXORPS(mxy, xy, xy1 operand.Op) { + if inst, err := x86.VXORPS(mxy, xy, xy1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VXORPS: Bitwise Logical XOR for Single-Precision Floating-Point Values. +// +// Forms: +// +// VXORPS xmm xmm xmm +// VXORPS m128 xmm xmm +// VXORPS ymm ymm ymm +// VXORPS m256 ymm ymm +// Construct and append a VXORPS instruction to the active function. +// Operates on the global context. +func VXORPS(mxy, xy, xy1 operand.Op) { ctx.VXORPS(mxy, xy, xy1) } + +// VZEROALL: Zero All YMM Registers. +// +// Forms: +// +// VZEROALL +// Construct and append a VZEROALL instruction to the active function. +func (c *Context) VZEROALL() { + if inst, err := x86.VZEROALL(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VZEROALL: Zero All YMM Registers. +// +// Forms: +// +// VZEROALL +// Construct and append a VZEROALL instruction to the active function. +// Operates on the global context. +func VZEROALL() { ctx.VZEROALL() } + +// VZEROUPPER: Zero Upper Bits of YMM Registers. +// +// Forms: +// +// VZEROUPPER +// Construct and append a VZEROUPPER instruction to the active function. +func (c *Context) VZEROUPPER() { + if inst, err := x86.VZEROUPPER(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// VZEROUPPER: Zero Upper Bits of YMM Registers. +// +// Forms: +// +// VZEROUPPER +// Construct and append a VZEROUPPER instruction to the active function. +// Operates on the global context. +func VZEROUPPER() { ctx.VZEROUPPER() } + +// XADDB: Exchange and Add. +// +// Forms: +// +// XADDB r8 r8 +// XADDB r8 m8 +// Construct and append a XADDB instruction to the active function. +func (c *Context) XADDB(r, mr operand.Op) { + if inst, err := x86.XADDB(r, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// XADDB: Exchange and Add. +// +// Forms: +// +// XADDB r8 r8 +// XADDB r8 m8 +// Construct and append a XADDB instruction to the active function. +// Operates on the global context. +func XADDB(r, mr operand.Op) { ctx.XADDB(r, mr) } + +// XADDL: Exchange and Add. +// +// Forms: +// +// XADDL r32 r32 +// XADDL r32 m32 +// Construct and append a XADDL instruction to the active function. +func (c *Context) XADDL(r, mr operand.Op) { + if inst, err := x86.XADDL(r, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// XADDL: Exchange and Add. +// +// Forms: +// +// XADDL r32 r32 +// XADDL r32 m32 +// Construct and append a XADDL instruction to the active function. +// Operates on the global context. +func XADDL(r, mr operand.Op) { ctx.XADDL(r, mr) } + +// XADDQ: Exchange and Add. +// +// Forms: +// +// XADDQ r64 r64 +// XADDQ r64 m64 +// Construct and append a XADDQ instruction to the active function. +func (c *Context) XADDQ(r, mr operand.Op) { + if inst, err := x86.XADDQ(r, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// XADDQ: Exchange and Add. +// +// Forms: +// +// XADDQ r64 r64 +// XADDQ r64 m64 +// Construct and append a XADDQ instruction to the active function. +// Operates on the global context. +func XADDQ(r, mr operand.Op) { ctx.XADDQ(r, mr) } + +// XADDW: Exchange and Add. +// +// Forms: +// +// XADDW r16 r16 +// XADDW r16 m16 +// Construct and append a XADDW instruction to the active function. +func (c *Context) XADDW(r, mr operand.Op) { + if inst, err := x86.XADDW(r, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// XADDW: Exchange and Add. +// +// Forms: +// +// XADDW r16 r16 +// XADDW r16 m16 +// Construct and append a XADDW instruction to the active function. +// Operates on the global context. +func XADDW(r, mr operand.Op) { ctx.XADDW(r, mr) } + +// XCHGB: Exchange Register/Memory with Register. +// +// Forms: +// +// XCHGB r8 r8 +// XCHGB m8 r8 +// XCHGB r8 m8 +// Construct and append a XCHGB instruction to the active function. +func (c *Context) XCHGB(mr, mr1 operand.Op) { + if inst, err := x86.XCHGB(mr, mr1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// XCHGB: Exchange Register/Memory with Register. +// +// Forms: +// +// XCHGB r8 r8 +// XCHGB m8 r8 +// XCHGB r8 m8 +// Construct and append a XCHGB instruction to the active function. +// Operates on the global context. +func XCHGB(mr, mr1 operand.Op) { ctx.XCHGB(mr, mr1) } + +// XCHGL: Exchange Register/Memory with Register. +// +// Forms: +// +// XCHGL r32 eax +// XCHGL eax r32 +// XCHGL r32 r32 +// XCHGL m32 r32 +// XCHGL r32 m32 +// Construct and append a XCHGL instruction to the active function. +func (c *Context) XCHGL(emr, emr1 operand.Op) { + if inst, err := x86.XCHGL(emr, emr1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// XCHGL: Exchange Register/Memory with Register. +// +// Forms: +// +// XCHGL r32 eax +// XCHGL eax r32 +// XCHGL r32 r32 +// XCHGL m32 r32 +// XCHGL r32 m32 +// Construct and append a XCHGL instruction to the active function. +// Operates on the global context. +func XCHGL(emr, emr1 operand.Op) { ctx.XCHGL(emr, emr1) } + +// XCHGQ: Exchange Register/Memory with Register. +// +// Forms: +// +// XCHGQ r64 rax +// XCHGQ rax r64 +// XCHGQ r64 r64 +// XCHGQ m64 r64 +// XCHGQ r64 m64 +// Construct and append a XCHGQ instruction to the active function. +func (c *Context) XCHGQ(mr, mr1 operand.Op) { + if inst, err := x86.XCHGQ(mr, mr1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// XCHGQ: Exchange Register/Memory with Register. +// +// Forms: +// +// XCHGQ r64 rax +// XCHGQ rax r64 +// XCHGQ r64 r64 +// XCHGQ m64 r64 +// XCHGQ r64 m64 +// Construct and append a XCHGQ instruction to the active function. +// Operates on the global context. +func XCHGQ(mr, mr1 operand.Op) { ctx.XCHGQ(mr, mr1) } + +// XCHGW: Exchange Register/Memory with Register. +// +// Forms: +// +// XCHGW r16 ax +// XCHGW ax r16 +// XCHGW r16 r16 +// XCHGW m16 r16 +// XCHGW r16 m16 +// Construct and append a XCHGW instruction to the active function. +func (c *Context) XCHGW(amr, amr1 operand.Op) { + if inst, err := x86.XCHGW(amr, amr1); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// XCHGW: Exchange Register/Memory with Register. +// +// Forms: +// +// XCHGW r16 ax +// XCHGW ax r16 +// XCHGW r16 r16 +// XCHGW m16 r16 +// XCHGW r16 m16 +// Construct and append a XCHGW instruction to the active function. +// Operates on the global context. +func XCHGW(amr, amr1 operand.Op) { ctx.XCHGW(amr, amr1) } + +// XGETBV: Get Value of Extended Control Register. +// +// Forms: +// +// XGETBV +// Construct and append a XGETBV instruction to the active function. +func (c *Context) XGETBV() { + if inst, err := x86.XGETBV(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// XGETBV: Get Value of Extended Control Register. +// +// Forms: +// +// XGETBV +// Construct and append a XGETBV instruction to the active function. +// Operates on the global context. +func XGETBV() { ctx.XGETBV() } + +// XLAT: Table Look-up Translation. +// +// Forms: +// +// XLAT +// Construct and append a XLAT instruction to the active function. +func (c *Context) XLAT() { + if inst, err := x86.XLAT(); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// XLAT: Table Look-up Translation. +// +// Forms: +// +// XLAT +// Construct and append a XLAT instruction to the active function. +// Operates on the global context. +func XLAT() { ctx.XLAT() } + +// XORB: Logical Exclusive OR. +// +// Forms: +// +// XORB imm8 al +// XORB imm8 r8 +// XORB r8 r8 +// XORB m8 r8 +// XORB imm8 m8 +// XORB r8 m8 +// Construct and append a XORB instruction to the active function. +func (c *Context) XORB(imr, amr operand.Op) { + if inst, err := x86.XORB(imr, amr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// XORB: Logical Exclusive OR. +// +// Forms: +// +// XORB imm8 al +// XORB imm8 r8 +// XORB r8 r8 +// XORB m8 r8 +// XORB imm8 m8 +// XORB r8 m8 +// Construct and append a XORB instruction to the active function. +// Operates on the global context. +func XORB(imr, amr operand.Op) { ctx.XORB(imr, amr) } + +// XORL: Logical Exclusive OR. +// +// Forms: +// +// XORL imm32 eax +// XORL imm8 r32 +// XORL imm32 r32 +// XORL r32 r32 +// XORL m32 r32 +// XORL imm8 m32 +// XORL imm32 m32 +// XORL r32 m32 +// Construct and append a XORL instruction to the active function. +func (c *Context) XORL(imr, emr operand.Op) { + if inst, err := x86.XORL(imr, emr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// XORL: Logical Exclusive OR. +// +// Forms: +// +// XORL imm32 eax +// XORL imm8 r32 +// XORL imm32 r32 +// XORL r32 r32 +// XORL m32 r32 +// XORL imm8 m32 +// XORL imm32 m32 +// XORL r32 m32 +// Construct and append a XORL instruction to the active function. +// Operates on the global context. +func XORL(imr, emr operand.Op) { ctx.XORL(imr, emr) } + +// XORPD: Bitwise Logical XOR for Double-Precision Floating-Point Values. +// +// Forms: +// +// XORPD xmm xmm +// XORPD m128 xmm +// Construct and append a XORPD instruction to the active function. +func (c *Context) XORPD(mx, x operand.Op) { + if inst, err := x86.XORPD(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// XORPD: Bitwise Logical XOR for Double-Precision Floating-Point Values. +// +// Forms: +// +// XORPD xmm xmm +// XORPD m128 xmm +// Construct and append a XORPD instruction to the active function. +// Operates on the global context. +func XORPD(mx, x operand.Op) { ctx.XORPD(mx, x) } + +// XORPS: Bitwise Logical XOR for Single-Precision Floating-Point Values. +// +// Forms: +// +// XORPS xmm xmm +// XORPS m128 xmm +// Construct and append a XORPS instruction to the active function. +func (c *Context) XORPS(mx, x operand.Op) { + if inst, err := x86.XORPS(mx, x); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// XORPS: Bitwise Logical XOR for Single-Precision Floating-Point Values. +// +// Forms: +// +// XORPS xmm xmm +// XORPS m128 xmm +// Construct and append a XORPS instruction to the active function. +// Operates on the global context. +func XORPS(mx, x operand.Op) { ctx.XORPS(mx, x) } + +// XORQ: Logical Exclusive OR. +// +// Forms: +// +// XORQ imm32 rax +// XORQ imm8 r64 +// XORQ imm32 r64 +// XORQ r64 r64 +// XORQ m64 r64 +// XORQ imm8 m64 +// XORQ imm32 m64 +// XORQ r64 m64 +// Construct and append a XORQ instruction to the active function. +func (c *Context) XORQ(imr, mr operand.Op) { + if inst, err := x86.XORQ(imr, mr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// XORQ: Logical Exclusive OR. +// +// Forms: +// +// XORQ imm32 rax +// XORQ imm8 r64 +// XORQ imm32 r64 +// XORQ r64 r64 +// XORQ m64 r64 +// XORQ imm8 m64 +// XORQ imm32 m64 +// XORQ r64 m64 +// Construct and append a XORQ instruction to the active function. +// Operates on the global context. +func XORQ(imr, mr operand.Op) { ctx.XORQ(imr, mr) } + +// XORW: Logical Exclusive OR. +// +// Forms: +// +// XORW imm16 ax +// XORW imm8 r16 +// XORW imm16 r16 +// XORW r16 r16 +// XORW m16 r16 +// XORW imm8 m16 +// XORW imm16 m16 +// XORW r16 m16 +// Construct and append a XORW instruction to the active function. +func (c *Context) XORW(imr, amr operand.Op) { + if inst, err := x86.XORW(imr, amr); err == nil { + c.Instruction(inst) + } else { + c.adderror(err) + } +} + +// XORW: Logical Exclusive OR. +// +// Forms: +// +// XORW imm16 ax +// XORW imm8 r16 +// XORW imm16 r16 +// XORW r16 r16 +// XORW m16 r16 +// XORW imm8 m16 +// XORW imm16 m16 +// XORW r16 m16 +// Construct and append a XORW instruction to the active function. +// Operates on the global context. +func XORW(imr, amr operand.Op) { ctx.XORW(imr, amr) } diff --git a/vendor/github.com/mmcloughlin/avo/build/zmov.go b/vendor/github.com/mmcloughlin/avo/build/zmov.go new file mode 100644 index 00000000..bfd9b081 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/build/zmov.go @@ -0,0 +1,71 @@ +// Code generated by command: avogen -output zmov.go mov. DO NOT EDIT. + +package build + +import ( + "github.com/mmcloughlin/avo/operand" + "go/types" +) + +func (c *Context) mov(a, b operand.Op, an, bn int, t *types.Basic) { + switch { + case (t.Info()&types.IsInteger) != 0 && an == 1 && bn == 1: + c.MOVB(a, b) + case (t.Info()&types.IsInteger) != 0 && (t.Info()&types.IsUnsigned) == 0 && an == 1 && bn == 4: + c.MOVBLSX(a, b) + case (t.Info()&types.IsInteger) != 0 && (t.Info()&types.IsUnsigned) != 0 && an == 1 && bn == 4: + c.MOVBLZX(a, b) + case (t.Info()&types.IsInteger) != 0 && (t.Info()&types.IsUnsigned) == 0 && an == 1 && bn == 8: + c.MOVBQSX(a, b) + case (t.Info()&types.IsInteger) != 0 && (t.Info()&types.IsUnsigned) != 0 && an == 1 && bn == 8: + c.MOVBQZX(a, b) + case (t.Info()&types.IsInteger) != 0 && (t.Info()&types.IsUnsigned) == 0 && an == 1 && bn == 2: + c.MOVBWSX(a, b) + case (t.Info()&types.IsInteger) != 0 && (t.Info()&types.IsUnsigned) != 0 && an == 1 && bn == 2: + c.MOVBWZX(a, b) + case (t.Info()&types.IsInteger) != 0 && an == 4 && bn == 4: + c.MOVL(a, b) + case (t.Info()&types.IsInteger) != 0 && (t.Info()&types.IsUnsigned) == 0 && an == 4 && bn == 8: + c.MOVLQSX(a, b) + case (t.Info()&types.IsInteger) != 0 && (t.Info()&types.IsUnsigned) != 0 && an == 4 && bn == 8: + c.MOVLQZX(a, b) + case (t.Info()&types.IsInteger) != 0 && an == 16 && bn == 16: + c.MOVOU(a, b) + case (t.Info()&types.IsInteger) != 0 && an == 4 && bn == 16: + c.MOVQ(a, b) + case (t.Info()&types.IsInteger) != 0 && an == 8 && bn == 8: + c.MOVQ(a, b) + case (t.Info()&types.IsInteger) != 0 && an == 8 && bn == 16: + c.MOVQ(a, b) + case (t.Info()&types.IsInteger) != 0 && an == 16 && bn == 4: + c.MOVQ(a, b) + case (t.Info()&types.IsInteger) != 0 && an == 16 && bn == 8: + c.MOVQ(a, b) + case (t.Info()&types.IsInteger) != 0 && an == 16 && bn == 16: + c.MOVQ(a, b) + case (t.Info()&types.IsFloat) != 0 && an == 8 && bn == 16: + c.MOVSD(a, b) + case (t.Info()&types.IsFloat) != 0 && an == 16 && bn == 8: + c.MOVSD(a, b) + case (t.Info()&types.IsFloat) != 0 && an == 16 && bn == 16: + c.MOVSD(a, b) + case (t.Info()&types.IsFloat) != 0 && an == 4 && bn == 16: + c.MOVSS(a, b) + case (t.Info()&types.IsFloat) != 0 && an == 16 && bn == 4: + c.MOVSS(a, b) + case (t.Info()&types.IsFloat) != 0 && an == 16 && bn == 16: + c.MOVSS(a, b) + case (t.Info()&types.IsInteger) != 0 && an == 2 && bn == 2: + c.MOVW(a, b) + case (t.Info()&types.IsInteger) != 0 && (t.Info()&types.IsUnsigned) == 0 && an == 2 && bn == 4: + c.MOVWLSX(a, b) + case (t.Info()&types.IsInteger) != 0 && (t.Info()&types.IsUnsigned) != 0 && an == 2 && bn == 4: + c.MOVWLZX(a, b) + case (t.Info()&types.IsInteger) != 0 && (t.Info()&types.IsUnsigned) == 0 && an == 2 && bn == 8: + c.MOVWQSX(a, b) + case (t.Info()&types.IsInteger) != 0 && (t.Info()&types.IsUnsigned) != 0 && an == 2 && bn == 8: + c.MOVWQZX(a, b) + default: + c.adderrormessage("could not deduce mov instruction") + } +} diff --git a/vendor/github.com/mmcloughlin/avo/buildtags/buildtags.go b/vendor/github.com/mmcloughlin/avo/buildtags/buildtags.go new file mode 100644 index 00000000..8fd61e10 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/buildtags/buildtags.go @@ -0,0 +1,312 @@ +// Package buildtags provides types for representing and manipulating build constraints. +// +// In Go, build constraints are represented as comments in source code together with file naming conventions. For example +// +// // +build linux,386 darwin,!cgo +// // +build !purego +// +// Any terms provided in the filename can be thought of as an implicit extra +// constraint comment line. Collectively, these are referred to as +// ``constraints''. Each line is a ``constraint''. Within each constraint the +// space-separated terms are ``options'', and within that the comma-separated +// items are ``terms'' which may be negated with at most one exclaimation mark. +// +// These represent a boolean formulae. The constraints are evaluated as the AND +// of constraint lines; a constraint is evaluated as the OR of its options and +// an option is evaluated as the AND of its terms. Overall build constraints are +// a boolean formula that is an AND of ORs of ANDs. +// +// This level of complexity is rarely used in Go programs. Therefore this +// package aims to provide access to all these layers of nesting if required, +// but make it easy to forget about for basic use cases too. +package buildtags + +import ( + "errors" + "fmt" + "strings" + "unicode" +) + +// Reference: https://github.com/golang/go/blob/204a8f55dc2e0ac8d27a781dab0da609b98560da/src/go/build/doc.go#L73-L92 +// +// // A build constraint is evaluated as the OR of space-separated options; +// // each option evaluates as the AND of its comma-separated terms; +// // and each term is an alphanumeric word or, preceded by !, its negation. +// // That is, the build constraint: +// // +// // // +build linux,386 darwin,!cgo +// // +// // corresponds to the boolean formula: +// // +// // (linux AND 386) OR (darwin AND (NOT cgo)) +// // +// // A file may have multiple build constraints. The overall constraint is the AND +// // of the individual constraints. That is, the build constraints: +// // +// // // +build linux darwin +// // // +build 386 +// // +// // corresponds to the boolean formula: +// // +// // (linux OR darwin) AND 386 +// + +// Interface represents a build constraint. +type Interface interface { + ConstraintsConvertable + fmt.GoStringer + Evaluate(v map[string]bool) bool + Validate() error +} + +// ConstraintsConvertable can be converted to a Constraints object. +type ConstraintsConvertable interface { + ToConstraints() Constraints +} + +// ConstraintConvertable can be converted to a Constraint. +type ConstraintConvertable interface { + ToConstraint() Constraint +} + +// OptionConvertable can be converted to an Option. +type OptionConvertable interface { + ToOption() Option +} + +// Constraints represents the AND of a list of Constraint lines. +type Constraints []Constraint + +// And builds Constraints that will be true if all of its constraints are true. +func And(cs ...ConstraintConvertable) Constraints { + constraints := Constraints{} + for _, c := range cs { + constraints = append(constraints, c.ToConstraint()) + } + return constraints +} + +// ToConstraints returns cs. +func (cs Constraints) ToConstraints() Constraints { return cs } + +// Validate validates the constraints set. +func (cs Constraints) Validate() error { + for _, c := range cs { + if err := c.Validate(); err != nil { + return err + } + } + return nil +} + +// Evaluate the boolean formula represented by cs under the given assignment of +// tag values. This is the AND of the values of the constituent Constraints. +func (cs Constraints) Evaluate(v map[string]bool) bool { + r := true + for _, c := range cs { + r = r && c.Evaluate(v) + } + return r +} + +// GoString represents Constraints as +build comment lines. +func (cs Constraints) GoString() string { + s := "" + for _, c := range cs { + s += c.GoString() + } + return s +} + +// Constraint represents the OR of a list of Options. +type Constraint []Option + +// Any builds a Constraint that will be true if any of its options are true. +func Any(opts ...OptionConvertable) Constraint { + c := Constraint{} + for _, opt := range opts { + c = append(c, opt.ToOption()) + } + return c +} + +// ParseConstraint parses a space-separated list of options. +func ParseConstraint(expr string) (Constraint, error) { + c := Constraint{} + for _, field := range strings.Fields(expr) { + opt, err := ParseOption(field) + if err != nil { + return c, err + } + c = append(c, opt) + } + return c, nil +} + +// ToConstraints returns the list of constraints containing just c. +func (c Constraint) ToConstraints() Constraints { return Constraints{c} } + +// ToConstraint returns c. +func (c Constraint) ToConstraint() Constraint { return c } + +// Validate validates the constraint. +func (c Constraint) Validate() error { + for _, o := range c { + if err := o.Validate(); err != nil { + return err + } + } + return nil +} + +// Evaluate the boolean formula represented by c under the given assignment of +// tag values. This is the OR of the values of the constituent Options. +func (c Constraint) Evaluate(v map[string]bool) bool { + r := false + for _, o := range c { + r = r || o.Evaluate(v) + } + return r +} + +// GoString represents the Constraint as one +build comment line. +func (c Constraint) GoString() string { + s := "// +build" + for _, o := range c { + s += " " + o.GoString() + } + return s + "\n" +} + +// Option represents the AND of a list of Terms. +type Option []Term + +// Opt builds an Option from the list of Terms. +func Opt(terms ...Term) Option { + return Option(terms) +} + +// ParseOption parses a comma-separated list of terms. +func ParseOption(expr string) (Option, error) { + opt := Option{} + for _, t := range strings.Split(expr, ",") { + opt = append(opt, Term(t)) + } + return opt, opt.Validate() +} + +// ToConstraints returns Constraints containing just this option. +func (o Option) ToConstraints() Constraints { return o.ToConstraint().ToConstraints() } + +// ToConstraint returns a Constraint containing just this option. +func (o Option) ToConstraint() Constraint { return Constraint{o} } + +// ToOption returns o. +func (o Option) ToOption() Option { return o } + +// Validate validates o. +func (o Option) Validate() error { + for _, t := range o { + if err := t.Validate(); err != nil { + return fmt.Errorf("invalid term \"%s\": %s", t, err) + } + } + return nil +} + +// Evaluate the boolean formula represented by o under the given assignment of +// tag values. This is the AND of the values of the constituent Terms. +func (o Option) Evaluate(v map[string]bool) bool { + r := true + for _, t := range o { + r = r && t.Evaluate(v) + } + return r +} + +// GoString represents the Option as a comma-separated list of terms. +func (o Option) GoString() string { + var ts []string + for _, t := range o { + ts = append(ts, t.GoString()) + } + return strings.Join(ts, ",") +} + +// Term is an atomic term in a build constraint: an identifier or its negation. +type Term string + +// Not returns a term for the negation of ident. +func Not(ident string) Term { + return Term("!" + ident) +} + +// ToConstraints returns Constraints containing just this term. +func (t Term) ToConstraints() Constraints { return t.ToOption().ToConstraints() } + +// ToConstraint returns a Constraint containing just this term. +func (t Term) ToConstraint() Constraint { return t.ToOption().ToConstraint() } + +// ToOption returns an Option containing just this term. +func (t Term) ToOption() Option { return Option{t} } + +// IsNegated reports whether t is the negation of an identifier. +func (t Term) IsNegated() bool { return strings.HasPrefix(string(t), "!") } + +// Name returns the identifier for this term. +func (t Term) Name() string { + return strings.TrimPrefix(string(t), "!") +} + +// Validate the term. +func (t Term) Validate() error { + // Reference: https://github.com/golang/go/blob/204a8f55dc2e0ac8d27a781dab0da609b98560da/src/cmd/go/internal/imports/build.go#L110-L112 + // + // if strings.HasPrefix(name, "!!") { // bad syntax, reject always + // return false + // } + // + if strings.HasPrefix(string(t), "!!") { + return errors.New("at most one '!' allowed") + } + + if len(t.Name()) == 0 { + return errors.New("empty tag name") + } + + // Reference: https://github.com/golang/go/blob/204a8f55dc2e0ac8d27a781dab0da609b98560da/src/cmd/go/internal/imports/build.go#L121-L127 + // + // // Tags must be letters, digits, underscores or dots. + // // Unlike in Go identifiers, all digits are fine (e.g., "386"). + // for _, c := range name { + // if !unicode.IsLetter(c) && !unicode.IsDigit(c) && c != '_' && c != '.' { + // return false + // } + // } + // + for _, c := range t.Name() { + if !unicode.IsLetter(c) && !unicode.IsDigit(c) && c != '_' && c != '.' { + return fmt.Errorf("character '%c' disallowed in tags", c) + } + } + + return nil +} + +// Evaluate the term under the given set of identifier values. +func (t Term) Evaluate(v map[string]bool) bool { + return (t.Validate() == nil) && (v[t.Name()] == !t.IsNegated()) +} + +// GoString returns t. +func (t Term) GoString() string { return string(t) } + +// SetTags builds a set where the given list of identifiers are true. +func SetTags(idents ...string) map[string]bool { + v := map[string]bool{} + for _, ident := range idents { + v[ident] = true + } + return v +} diff --git a/vendor/github.com/mmcloughlin/avo/examples/stadtx/LICENSE b/vendor/github.com/mmcloughlin/avo/examples/stadtx/LICENSE new file mode 100644 index 00000000..bd92ca27 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/examples/stadtx/LICENSE @@ -0,0 +1,167 @@ +// Code generated by downloading from https://raw.githubusercontent.com/demerphq/BeagleHash/5f8620b953230e5b16171b745155fc3b0ef8f75e/LICENSE. DO NOT EDIT. + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/vendor/github.com/mmcloughlin/avo/gotypes/components.go b/vendor/github.com/mmcloughlin/avo/gotypes/components.go new file mode 100644 index 00000000..2206afa6 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/gotypes/components.go @@ -0,0 +1,253 @@ +package gotypes + +import ( + "errors" + "fmt" + "go/token" + "go/types" + "strconv" + + "github.com/mmcloughlin/avo/reg" + + "github.com/mmcloughlin/avo/operand" +) + +// Sizes provides type sizes used by the standard Go compiler on amd64. +var Sizes = types.SizesFor("gc", "amd64") + +// Basic represents a primitive/basic type at a given memory address. +type Basic struct { + Addr operand.Mem + Type *types.Basic +} + +// Component provides access to sub-components of a Go type. +type Component interface { + // When the component has no further sub-components, Resolve will return a + // reference to the components type and memory address. If there was an error + // during any previous calls to Component methods, they will be returned at + // resolution time. + Resolve() (*Basic, error) + + Dereference(r reg.Register) Component // dereference a pointer + Base() Component // base pointer of a string or slice + Len() Component // length of a string or slice + Cap() Component // capacity of a slice + Real() Component // real part of a complex value + Imag() Component // imaginary part of a complex value + Index(int) Component // index into an array + Field(string) Component // access a struct field +} + +// componenterr is an error that also provides a null implementation of the +// Component interface. This enables us to return an error from Component +// methods whilst also allowing method chaining to continue. +type componenterr string + +func errorf(format string, args ...interface{}) Component { + return componenterr(fmt.Sprintf(format, args...)) +} + +func (c componenterr) Error() string { return string(c) } +func (c componenterr) Resolve() (*Basic, error) { return nil, c } +func (c componenterr) Dereference(r reg.Register) Component { return c } +func (c componenterr) Base() Component { return c } +func (c componenterr) Len() Component { return c } +func (c componenterr) Cap() Component { return c } +func (c componenterr) Real() Component { return c } +func (c componenterr) Imag() Component { return c } +func (c componenterr) Index(int) Component { return c } +func (c componenterr) Field(string) Component { return c } + +type component struct { + typ types.Type + addr operand.Mem +} + +// NewComponent builds a component for the named type at the given address. +func NewComponent(t types.Type, addr operand.Mem) Component { + return &component{ + typ: t, + addr: addr, + } +} + +func (c *component) Resolve() (*Basic, error) { + b := toprimitive(c.typ) + if b == nil { + return nil, errors.New("component is not primitive") + } + return &Basic{ + Addr: c.addr, + Type: b, + }, nil +} + +func (c *component) Dereference(r reg.Register) Component { + p, ok := c.typ.Underlying().(*types.Pointer) + if !ok { + return errorf("not pointer type") + } + return NewComponent(p.Elem(), operand.Mem{Base: r}) +} + +// Reference: https://github.com/golang/go/blob/50bd1c4d4eb4fac8ddeb5f063c099daccfb71b26/src/reflect/value.go#L1800-L1804 +// +// type SliceHeader struct { +// Data uintptr +// Len int +// Cap int +// } +// +var slicehdroffsets = Sizes.Offsetsof([]*types.Var{ + types.NewField(token.NoPos, nil, "Data", types.Typ[types.Uintptr], false), + types.NewField(token.NoPos, nil, "Len", types.Typ[types.Int], false), + types.NewField(token.NoPos, nil, "Cap", types.Typ[types.Int], false), +}) + +func (c *component) Base() Component { + if !isslice(c.typ) && !isstring(c.typ) { + return errorf("only slices and strings have base pointers") + } + return c.sub("_base", int(slicehdroffsets[0]), types.Typ[types.Uintptr]) +} + +func (c *component) Len() Component { + if !isslice(c.typ) && !isstring(c.typ) { + return errorf("only slices and strings have length fields") + } + return c.sub("_len", int(slicehdroffsets[1]), types.Typ[types.Int]) +} + +func (c *component) Cap() Component { + if !isslice(c.typ) { + return errorf("only slices have capacity fields") + } + return c.sub("_cap", int(slicehdroffsets[2]), types.Typ[types.Int]) +} + +func (c *component) Real() Component { + if !iscomplex(c.typ) { + return errorf("only complex types have real values") + } + f := complextofloat(c.typ) + return c.sub("_real", 0, f) +} + +func (c *component) Imag() Component { + if !iscomplex(c.typ) { + return errorf("only complex types have imaginary values") + } + f := complextofloat(c.typ) + return c.sub("_imag", int(Sizes.Sizeof(f)), f) +} + +func (c *component) Index(i int) Component { + a, ok := c.typ.Underlying().(*types.Array) + if !ok { + return errorf("not array type") + } + if int64(i) >= a.Len() { + return errorf("array index out of bounds") + } + // Reference: https://github.com/golang/tools/blob/bcd4e47d02889ebbc25c9f4bf3d27e4124b0bf9d/go/analysis/passes/asmdecl/asmdecl.go#L482-L494 + // + // case asmArray: + // tu := t.Underlying().(*types.Array) + // elem := tu.Elem() + // // Calculate offset of each element array. + // fields := []*types.Var{ + // types.NewVar(token.NoPos, nil, "fake0", elem), + // types.NewVar(token.NoPos, nil, "fake1", elem), + // } + // offsets := arch.sizes.Offsetsof(fields) + // elemoff := int(offsets[1]) + // for i := 0; i < int(tu.Len()); i++ { + // cc = appendComponentsRecursive(arch, elem, cc, suffix+"_"+strconv.Itoa(i), i*elemoff) + // } + // + elem := a.Elem() + elemsize := int(Sizes.Sizeof(types.NewArray(elem, 2)) - Sizes.Sizeof(types.NewArray(elem, 1))) + return c.sub("_"+strconv.Itoa(i), i*elemsize, elem) +} + +func (c *component) Field(n string) Component { + s, ok := c.typ.Underlying().(*types.Struct) + if !ok { + return errorf("not struct type") + } + // Reference: https://github.com/golang/tools/blob/13ba8ad772dfbf0f451b5dd0679e9c5605afc05d/go/analysis/passes/asmdecl/asmdecl.go#L471-L480 + // + // case asmStruct: + // tu := t.Underlying().(*types.Struct) + // fields := make([]*types.Var, tu.NumFields()) + // for i := 0; i < tu.NumFields(); i++ { + // fields[i] = tu.Field(i) + // } + // offsets := arch.sizes.Offsetsof(fields) + // for i, f := range fields { + // cc = appendComponentsRecursive(arch, f.Type(), cc, suffix+"_"+f.Name(), off+int(offsets[i])) + // } + // + fields := make([]*types.Var, s.NumFields()) + for i := 0; i < s.NumFields(); i++ { + fields[i] = s.Field(i) + } + offsets := Sizes.Offsetsof(fields) + for i, f := range fields { + if f.Name() == n { + return c.sub("_"+n, int(offsets[i]), f.Type()) + } + } + return errorf("struct does not have field '%s'", n) +} + +func (c *component) sub(suffix string, offset int, t types.Type) *component { + s := *c + if s.addr.Symbol.Name != "" { + s.addr.Symbol.Name += suffix + } + s.addr = s.addr.Offset(offset) + s.typ = t + return &s +} + +func isslice(t types.Type) bool { + _, ok := t.Underlying().(*types.Slice) + return ok +} + +func isstring(t types.Type) bool { + b, ok := t.Underlying().(*types.Basic) + return ok && b.Kind() == types.String +} + +func iscomplex(t types.Type) bool { + b, ok := t.Underlying().(*types.Basic) + return ok && (b.Info()&types.IsComplex) != 0 +} + +func complextofloat(t types.Type) types.Type { + switch Sizes.Sizeof(t) { + case 16: + return types.Typ[types.Float64] + case 8: + return types.Typ[types.Float32] + } + panic("bad") +} + +// toprimitive determines whether t is primitive (cannot be reduced into +// components). If it is, it returns the basic type for t, otherwise returns +// nil. +func toprimitive(t types.Type) *types.Basic { + switch b := t.(type) { + case *types.Basic: + if (b.Info() & (types.IsString | types.IsComplex)) == 0 { + return b + } + case *types.Pointer: + return types.Typ[types.Uintptr] + } + return nil +} diff --git a/vendor/github.com/mmcloughlin/avo/gotypes/doc.go b/vendor/github.com/mmcloughlin/avo/gotypes/doc.go new file mode 100644 index 00000000..fa8f0783 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/gotypes/doc.go @@ -0,0 +1,2 @@ +// Package gotypes provides helpers for interacting with Go types within avo functions. +package gotypes diff --git a/vendor/github.com/mmcloughlin/avo/gotypes/signature.go b/vendor/github.com/mmcloughlin/avo/gotypes/signature.go new file mode 100644 index 00000000..e0000203 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/gotypes/signature.go @@ -0,0 +1,177 @@ +package gotypes + +import ( + "bytes" + "errors" + "fmt" + "go/token" + "go/types" + "strconv" + + "github.com/mmcloughlin/avo/operand" +) + +// Signature represents a Go function signature. +type Signature struct { + pkg *types.Package + sig *types.Signature + params *Tuple + results *Tuple +} + +// NewSignature constructs a Signature. +func NewSignature(pkg *types.Package, sig *types.Signature) *Signature { + s := &Signature{ + pkg: pkg, + sig: sig, + } + s.init() + return s +} + +// NewSignatureVoid builds the void signature "func()". +func NewSignatureVoid() *Signature { + return NewSignature(nil, types.NewSignature(nil, nil, nil, false)) +} + +// LookupSignature returns the signature of the named function in the provided package. +func LookupSignature(pkg *types.Package, name string) (*Signature, error) { + scope := pkg.Scope() + obj := scope.Lookup(name) + if obj == nil { + return nil, fmt.Errorf("could not find function \"%s\"", name) + } + s, ok := obj.Type().(*types.Signature) + if !ok { + return nil, fmt.Errorf("object \"%s\" does not have signature type", name) + } + return NewSignature(pkg, s), nil +} + +// ParseSignature builds a Signature by parsing a Go function type expression. +// The function type must reference builtin types only; see +// ParseSignatureInPackage if custom types are required. +func ParseSignature(expr string) (*Signature, error) { + return ParseSignatureInPackage(nil, expr) +} + +// ParseSignatureInPackage builds a Signature by parsing a Go function type +// expression. The expression may reference types in the provided package. +func ParseSignatureInPackage(pkg *types.Package, expr string) (*Signature, error) { + tv, err := types.Eval(token.NewFileSet(), pkg, token.NoPos, expr) + if err != nil { + return nil, err + } + if tv.Value != nil { + return nil, errors.New("signature expression should have nil value") + } + s, ok := tv.Type.(*types.Signature) + if !ok { + return nil, errors.New("provided type is not a function signature") + } + return NewSignature(pkg, s), nil +} + +// Params returns the function signature argument types. +func (s *Signature) Params() *Tuple { return s.params } + +// Results returns the function return types. +func (s *Signature) Results() *Tuple { return s.results } + +// Bytes returns the total size of the function arguments and return values. +func (s *Signature) Bytes() int { return s.Params().Bytes() + s.Results().Bytes() } + +// String writes Signature as a string. This does not include the "func" keyword. +func (s *Signature) String() string { + var buf bytes.Buffer + types.WriteSignature(&buf, s.sig, func(pkg *types.Package) string { + if pkg == s.pkg { + return "" + } + return pkg.Name() + }) + return buf.String() +} + +func (s *Signature) init() { + p := s.sig.Params() + r := s.sig.Results() + + // Compute parameter offsets. + vs := tuplevars(p) + vs = append(vs, types.NewParam(token.NoPos, nil, "sentinel", types.Typ[types.Uint64])) + paramsoffsets := Sizes.Offsetsof(vs) + paramssize := paramsoffsets[p.Len()] + s.params = newTuple(p, paramsoffsets, paramssize, "arg") + + // Result offsets. + vs = tuplevars(r) + resultsoffsets := Sizes.Offsetsof(vs) + var resultssize int64 + if n := len(vs); n > 0 { + resultssize = resultsoffsets[n-1] + Sizes.Sizeof(vs[n-1].Type()) + } + for i := range resultsoffsets { + resultsoffsets[i] += paramssize + } + s.results = newTuple(r, resultsoffsets, resultssize, "ret") +} + +// Tuple represents a tuple of variables, such as function arguments or results. +type Tuple struct { + components []Component + byname map[string]Component + size int +} + +func newTuple(t *types.Tuple, offsets []int64, size int64, defaultprefix string) *Tuple { + tuple := &Tuple{ + byname: map[string]Component{}, + size: int(size), + } + for i := 0; i < t.Len(); i++ { + v := t.At(i) + name := v.Name() + if name == "" { + name = defaultprefix + if i > 0 { + name += strconv.Itoa(i) + } + } + addr := operand.NewParamAddr(name, int(offsets[i])) + c := NewComponent(v.Type(), addr) + tuple.components = append(tuple.components, c) + if v.Name() != "" { + tuple.byname[v.Name()] = c + } + } + return tuple +} + +// Lookup returns the variable with the given name. +func (t *Tuple) Lookup(name string) Component { + e := t.byname[name] + if e == nil { + return errorf("unknown variable \"%s\"", name) + } + return e +} + +// At returns the variable at index i. +func (t *Tuple) At(i int) Component { + if i >= len(t.components) { + return errorf("index out of range") + } + return t.components[i] +} + +// Bytes returns the size of the Tuple. This may include additional padding. +func (t *Tuple) Bytes() int { return t.size } + +func tuplevars(t *types.Tuple) []*types.Var { + vs := make([]*types.Var, t.Len()) + for i := 0; i < t.Len(); i++ { + vs[i] = t.At(i) + } + return vs +} diff --git a/vendor/github.com/mmcloughlin/avo/internal/data/LICENSE b/vendor/github.com/mmcloughlin/avo/internal/data/LICENSE new file mode 100644 index 00000000..baf24bb5 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/internal/data/LICENSE @@ -0,0 +1,52 @@ +----------------------------------------------------------------------------- +golang/arch license +----------------------------------------------------------------------------- + +Copyright (c) 2015 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------------------------------------------------------------------- +Maratyszcza/Opcodes license +----------------------------------------------------------------------------- + +======================================= +Opcodes Database license (2-clause BSD) +======================================= + +Copyright (c) 2017 Facebook Inc. +Copyright (c) 2014-2017, Georgia Institute of Technology +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/mmcloughlin/avo/internal/load/testdata b/vendor/github.com/mmcloughlin/avo/internal/load/testdata new file mode 120000 index 00000000..4909e06e --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/internal/load/testdata @@ -0,0 +1 @@ +../data \ No newline at end of file diff --git a/vendor/github.com/mmcloughlin/avo/internal/opcodescsv/testdata b/vendor/github.com/mmcloughlin/avo/internal/opcodescsv/testdata new file mode 120000 index 00000000..4909e06e --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/internal/opcodescsv/testdata @@ -0,0 +1 @@ +../data \ No newline at end of file diff --git a/vendor/github.com/mmcloughlin/avo/internal/opcodesxml/testdata/x86_64.xml b/vendor/github.com/mmcloughlin/avo/internal/opcodesxml/testdata/x86_64.xml new file mode 120000 index 00000000..eb6040a0 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/internal/opcodesxml/testdata/x86_64.xml @@ -0,0 +1 @@ +../../data/x86_64.xml \ No newline at end of file diff --git a/vendor/github.com/mmcloughlin/avo/internal/prnt/printer.go b/vendor/github.com/mmcloughlin/avo/internal/prnt/printer.go new file mode 100644 index 00000000..2239946f --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/internal/prnt/printer.go @@ -0,0 +1,58 @@ +// Package prnt provides common functionality for code generators. +package prnt + +import ( + "bytes" + "fmt" + "io" +) + +// Generator provides convenience methods for code generators. In particular it +// provides fmt-like methods which print to an internal buffer. It also allows +// any errors to be stored so they can be checked at the end, rather than having +// error checks obscuring the code generation. +type Generator struct { + buf bytes.Buffer + err error +} + +// Raw provides direct access to the underlying output stream. +func (g *Generator) Raw() io.Writer { + return &g.buf +} + +// Printf prints to the internal buffer. +func (g *Generator) Printf(format string, args ...interface{}) { + if g.err != nil { + return + } + _, err := fmt.Fprintf(&g.buf, format, args...) + g.AddError(err) +} + +// NL prints a new line. +func (g *Generator) NL() { + g.Printf("\n") +} + +// Comment writes comment lines prefixed with "// ". +func (g *Generator) Comment(lines ...string) { + for _, line := range lines { + g.Printf("// %s\n", line) + } +} + +// AddError records an error in code generation. The first non-nil error will +// prevent printing operations from writing anything else, and the error will be +// returned from Result(). +func (g *Generator) AddError(err error) { + if err != nil && g.err == nil { + g.err = err + } +} + +// Result returns the printed bytes. If any error was recorded with AddError +// during code generation, the first such error will be returned here. +func (g *Generator) Result() ([]byte, error) { + return g.buf.Bytes(), g.err +} diff --git a/vendor/github.com/mmcloughlin/avo/internal/stack/stack.go b/vendor/github.com/mmcloughlin/avo/internal/stack/stack.go new file mode 100644 index 00000000..1d327d9d --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/internal/stack/stack.go @@ -0,0 +1,73 @@ +// Package stack provides helpers for querying the callstack. +package stack + +import ( + "path" + "runtime" + "strings" +) + +// Frames returns at most max callstack Frames, starting with its caller and +// skipping skip Frames. +func Frames(skip, max int) []runtime.Frame { + pc := make([]uintptr, max) + n := runtime.Callers(skip+2, pc) + if n == 0 { + return nil + } + pc = pc[:n] + frames := runtime.CallersFrames(pc) + var fs []runtime.Frame + for { + f, more := frames.Next() + fs = append(fs, f) + if !more { + break + } + } + return fs +} + +// Match returns the first stack frame for which the predicate function returns +// true. Returns nil if no match is found. Starts matching after skip frames, +// starting with its caller. +func Match(skip int, predicate func(runtime.Frame) bool) *runtime.Frame { + i, n := skip+1, 16 + for { + fs := Frames(i, n) + for j, f := range fs { + if predicate(f) { + return &fs[j] + } + } + if len(fs) < n { + break + } + i += n + } + return nil +} + +// Main returns the main() function Frame. +func Main() *runtime.Frame { + return Match(1, func(f runtime.Frame) bool { + return f.Function == "main.main" + }) +} + +// ExternalCaller returns the first frame outside the callers package. +func ExternalCaller() *runtime.Frame { + var first *runtime.Frame + return Match(1, func(f runtime.Frame) bool { + if first == nil { + first = &f + } + return pkg(first.Function) != pkg(f.Function) + }) +} + +func pkg(ident string) string { + dir, name := path.Split(ident) + parts := strings.Split(name, ".") + return dir + parts[0] +} diff --git a/vendor/github.com/mmcloughlin/avo/ir/doc.go b/vendor/github.com/mmcloughlin/avo/ir/doc.go new file mode 100644 index 00000000..de02f464 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/ir/doc.go @@ -0,0 +1,2 @@ +// Package ir provides the intermediate representation of avo programs. +package ir diff --git a/vendor/github.com/mmcloughlin/avo/ir/ir.go b/vendor/github.com/mmcloughlin/avo/ir/ir.go new file mode 100644 index 00000000..8cf8e8e4 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/ir/ir.go @@ -0,0 +1,325 @@ +package ir + +import ( + "errors" + + "github.com/mmcloughlin/avo/attr" + "github.com/mmcloughlin/avo/buildtags" + "github.com/mmcloughlin/avo/gotypes" + "github.com/mmcloughlin/avo/operand" + "github.com/mmcloughlin/avo/reg" +) + +// Node is a part of a Function. +type Node interface { + node() +} + +// Label within a function. +type Label string + +func (l Label) node() {} + +// Comment represents a multi-line comment. +type Comment struct { + Lines []string +} + +func (c *Comment) node() {} + +// NewComment builds a Comment consisting of the provided lines. +func NewComment(lines ...string) *Comment { + return &Comment{ + Lines: lines, + } +} + +// Instruction is a single instruction in a function. +type Instruction struct { + Opcode string + Operands []operand.Op + + Inputs []operand.Op + Outputs []operand.Op + + IsTerminal bool + IsBranch bool + IsConditional bool + + // CFG. + Pred []*Instruction + Succ []*Instruction + + // LiveIn/LiveOut are sets of live register IDs pre/post execution. + LiveIn reg.Set + LiveOut reg.Set +} + +func (i *Instruction) node() {} + +// TargetLabel returns the label referenced by this instruction. Returns nil if +// no label is referenced. +func (i Instruction) TargetLabel() *Label { + if !i.IsBranch { + return nil + } + if len(i.Operands) == 0 { + return nil + } + if ref, ok := i.Operands[0].(operand.LabelRef); ok { + lbl := Label(ref) + return &lbl + } + return nil +} + +// Registers returns all registers involved in the instruction. +func (i Instruction) Registers() []reg.Register { + var rs []reg.Register + for _, op := range i.Operands { + rs = append(rs, operand.Registers(op)...) + } + return rs +} + +// InputRegisters returns all registers read by this instruction. +func (i Instruction) InputRegisters() []reg.Register { + var rs []reg.Register + for _, op := range i.Inputs { + rs = append(rs, operand.Registers(op)...) + } + for _, op := range i.Outputs { + if operand.IsMem(op) { + rs = append(rs, operand.Registers(op)...) + } + } + return rs +} + +// OutputRegisters returns all registers written by this instruction. +func (i Instruction) OutputRegisters() []reg.Register { + var rs []reg.Register + for _, op := range i.Outputs { + if r, ok := op.(reg.Register); ok { + rs = append(rs, r) + } + } + return rs +} + +// Section is a part of a file. +type Section interface { + section() +} + +// File represents an assembly file. +type File struct { + Constraints buildtags.Constraints + Includes []string + Sections []Section +} + +// NewFile initializes an empty file. +func NewFile() *File { + return &File{} +} + +// AddSection appends a Section to the file. +func (f *File) AddSection(s Section) { + f.Sections = append(f.Sections, s) +} + +// Functions returns all functions in the file. +func (f *File) Functions() []*Function { + var fns []*Function + for _, s := range f.Sections { + if fn, ok := s.(*Function); ok { + fns = append(fns, fn) + } + } + return fns +} + +// Function represents an assembly function. +type Function struct { + Name string + Attributes attr.Attribute + Doc []string + Signature *gotypes.Signature + LocalSize int + + Nodes []Node + + // LabelTarget maps from label name to the following instruction. + LabelTarget map[Label]*Instruction + + // Register allocation. + Allocation reg.Allocation +} + +func (f *Function) section() {} + +// NewFunction builds an empty function of the given name. +func NewFunction(name string) *Function { + return &Function{ + Name: name, + Signature: gotypes.NewSignatureVoid(), + } +} + +// SetSignature sets the function signature. +func (f *Function) SetSignature(s *gotypes.Signature) { + f.Signature = s +} + +// AllocLocal allocates size bytes in this function's stack. +// Returns a reference to the base pointer for the newly allocated region. +func (f *Function) AllocLocal(size int) operand.Mem { + ptr := operand.NewStackAddr(f.LocalSize) + f.LocalSize += size + return ptr +} + +// AddInstruction appends an instruction to f. +func (f *Function) AddInstruction(i *Instruction) { + f.AddNode(i) +} + +// AddLabel appends a label to f. +func (f *Function) AddLabel(l Label) { + f.AddNode(l) +} + +// AddComment adds comment lines to f. +func (f *Function) AddComment(lines ...string) { + f.AddNode(NewComment(lines...)) +} + +// AddNode appends a Node to f. +func (f *Function) AddNode(n Node) { + f.Nodes = append(f.Nodes, n) +} + +// Instructions returns just the list of instruction nodes. +func (f *Function) Instructions() []*Instruction { + var is []*Instruction + for _, n := range f.Nodes { + i, ok := n.(*Instruction) + if ok { + is = append(is, i) + } + } + return is +} + +// Labels returns just the list of label nodes. +func (f *Function) Labels() []Label { + var lbls []Label + for _, n := range f.Nodes { + lbl, ok := n.(Label) + if ok { + lbls = append(lbls, lbl) + } + } + return lbls +} + +// Stub returns the Go function declaration. +func (f *Function) Stub() string { + return "func " + f.Name + f.Signature.String() +} + +// FrameBytes returns the size of the stack frame in bytes. +func (f *Function) FrameBytes() int { + return f.LocalSize +} + +// ArgumentBytes returns the size of the arguments in bytes. +func (f *Function) ArgumentBytes() int { + return f.Signature.Bytes() +} + +// Datum represents a data element at a particular offset of a data section. +type Datum struct { + Offset int + Value operand.Constant +} + +// NewDatum builds a Datum from the given constant. +func NewDatum(offset int, v operand.Constant) Datum { + return Datum{ + Offset: offset, + Value: v, + } +} + +// Interval returns the range of bytes this datum will occupy within its section. +func (d Datum) Interval() (int, int) { + return d.Offset, d.Offset + d.Value.Bytes() +} + +// Overlaps returns true +func (d Datum) Overlaps(other Datum) bool { + s, e := d.Interval() + so, eo := other.Interval() + return !(eo <= s || e <= so) +} + +// Global represents a DATA section. +type Global struct { + Symbol operand.Symbol + Attributes attr.Attribute + Data []Datum + Size int +} + +// NewGlobal constructs an empty DATA section. +func NewGlobal(sym operand.Symbol) *Global { + return &Global{ + Symbol: sym, + } +} + +// NewStaticGlobal is a convenience for building a static DATA section. +func NewStaticGlobal(name string) *Global { + return NewGlobal(operand.NewStaticSymbol(name)) +} + +func (g *Global) section() {} + +// Base returns a pointer to the start of the data section. +func (g *Global) Base() operand.Mem { + return operand.NewDataAddr(g.Symbol, 0) +} + +// Grow ensures that the data section has at least the given size. +func (g *Global) Grow(size int) { + if g.Size < size { + g.Size = size + } +} + +// AddDatum adds d to this data section, growing it if necessary. Errors if the datum overlaps with existing data. +func (g *Global) AddDatum(d Datum) error { + for _, other := range g.Data { + if d.Overlaps(other) { + return errors.New("overlaps existing datum") + } + } + g.add(d) + return nil +} + +// Append the constant to the end of the data section. +func (g *Global) Append(v operand.Constant) { + g.add(Datum{ + Offset: g.Size, + Value: v, + }) +} + +func (g *Global) add(d Datum) { + _, end := d.Interval() + g.Grow(end) + g.Data = append(g.Data, d) +} diff --git a/vendor/github.com/mmcloughlin/avo/operand/checks.go b/vendor/github.com/mmcloughlin/avo/operand/checks.go new file mode 100644 index 00000000..2585479d --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/operand/checks.go @@ -0,0 +1,247 @@ +package operand + +import "github.com/mmcloughlin/avo/reg" + +// Pure type assertion checks: + +// IsRegister returns whether op has type reg.Register. +func IsRegister(op Op) bool { _, ok := op.(reg.Register); return ok } + +// IsMem returns whether op has type Mem. +func IsMem(op Op) bool { _, ok := op.(Mem); return ok } + +// IsRel returns whether op has type Rel. +func IsRel(op Op) bool { _, ok := op.(Rel); return ok } + +// Checks corresponding to specific operand types in the Intel Manual: + +// Is1 returns true if op is the immediate constant 1. +func Is1(op Op) bool { + i, ok := op.(U8) + return ok && i == 1 +} + +// Is3 returns true if op is the immediate constant 3. +func Is3(op Op) bool { + i, ok := op.(U8) + return ok && i == 3 +} + +// IsIMM2U returns true if op is a 2-bit unsigned immediate (less than 4). +func IsIMM2U(op Op) bool { + i, ok := op.(U8) + return ok && i < 4 +} + +// IsIMM8 returns true is op is an 8-bit immediate. +func IsIMM8(op Op) bool { + _, ok := op.(U8) + return ok +} + +// IsIMM16 returns true is op is a 16-bit immediate. +func IsIMM16(op Op) bool { + _, ok := op.(U16) + return ok +} + +// IsIMM32 returns true is op is a 32-bit immediate. +func IsIMM32(op Op) bool { + _, ok := op.(U32) + return ok +} + +// IsIMM64 returns true is op is a 64-bit immediate. +func IsIMM64(op Op) bool { + _, ok := op.(U64) + return ok +} + +// IsAL returns true if op is the AL register. +func IsAL(op Op) bool { + return op == reg.AL +} + +// IsCL returns true if op is the CL register. +func IsCL(op Op) bool { + return op == reg.CL +} + +// IsAX returns true if op is the 16-bit AX register. +func IsAX(op Op) bool { + return op == reg.AX +} + +// IsEAX returns true if op is the 32-bit EAX register. +func IsEAX(op Op) bool { + return op == reg.EAX +} + +// IsRAX returns true if op is the 64-bit RAX register. +func IsRAX(op Op) bool { + return op == reg.RAX +} + +// IsR8 returns true if op is an 8-bit general-purpose register. +func IsR8(op Op) bool { + return IsGP(op, 1) +} + +// IsR16 returns true if op is a 16-bit general-purpose register. +func IsR16(op Op) bool { + return IsGP(op, 2) +} + +// IsR32 returns true if op is a 32-bit general-purpose register. +func IsR32(op Op) bool { + return IsGP(op, 4) +} + +// IsR64 returns true if op is a 64-bit general-purpose register. +func IsR64(op Op) bool { + return IsGP(op, 8) +} + +// IsPseudo returns true if op is a pseudo register. +func IsPseudo(op Op) bool { + return IsRegisterKind(op, reg.KindPseudo) +} + +// IsGP returns true if op is a general-purpose register of size n bytes. +func IsGP(op Op, n uint) bool { + return IsRegisterKindSize(op, reg.KindGP, n) +} + +// IsXMM0 returns true if op is the X0 register. +func IsXMM0(op Op) bool { + return op == reg.X0 +} + +// IsXMM returns true if op is a 128-bit XMM register. +func IsXMM(op Op) bool { + return IsRegisterKindSize(op, reg.KindVector, 16) +} + +// IsYMM returns true if op is a 256-bit YMM register. +func IsYMM(op Op) bool { + return IsRegisterKindSize(op, reg.KindVector, 32) +} + +// IsRegisterKindSize returns true if op is a register of the given kind and size in bytes. +func IsRegisterKindSize(op Op, k reg.Kind, n uint) bool { + r, ok := op.(reg.Register) + return ok && r.Kind() == k && r.Size() == n +} + +// IsRegisterKind returns true if op is a register of the given kind. +func IsRegisterKind(op Op, k reg.Kind) bool { + r, ok := op.(reg.Register) + return ok && r.Kind() == k +} + +// IsM returns true if op is a 16-, 32- or 64-bit memory operand. +func IsM(op Op) bool { + // TODO(mbm): confirm "m" check is defined correctly + // Intel manual: "A 16-, 32- or 64-bit operand in memory." + return IsM16(op) || IsM32(op) || IsM64(op) +} + +// IsM8 returns true if op is an 8-bit memory operand. +func IsM8(op Op) bool { + // TODO(mbm): confirm "m8" check is defined correctly + // Intel manual: "A byte operand in memory, usually expressed as a variable or + // array name, but pointed to by the DS:(E)SI or ES:(E)DI registers. In 64-bit + // mode, it is pointed to by the RSI or RDI registers." + return IsMSize(op, 1) +} + +// IsM16 returns true if op is a 16-bit memory operand. +func IsM16(op Op) bool { + return IsMSize(op, 2) +} + +// IsM32 returns true if op is a 16-bit memory operand. +func IsM32(op Op) bool { + return IsMSize(op, 4) +} + +// IsM64 returns true if op is a 64-bit memory operand. +func IsM64(op Op) bool { + return IsMSize(op, 8) +} + +// IsMSize returns true if op is a memory operand using general-purpose address +// registers of the given size in bytes. +func IsMSize(op Op, n uint) bool { + // TODO(mbm): should memory operands have a size attribute as well? + // TODO(mbm): m8,m16,m32,m64 checks do not actually check size + m, ok := op.(Mem) + return ok && IsMReg(m.Base) && (m.Index == nil || IsMReg(m.Index)) +} + +// IsMReg returns true if op is a register that can be used in a memory operand. +func IsMReg(op Op) bool { + return IsPseudo(op) || IsRegisterKind(op, reg.KindGP) +} + +// IsM128 returns true if op is a 128-bit memory operand. +func IsM128(op Op) bool { + // TODO(mbm): should "m128" be the same as "m64"? + return IsM64(op) +} + +// IsM256 returns true if op is a 256-bit memory operand. +func IsM256(op Op) bool { + // TODO(mbm): should "m256" be the same as "m64"? + return IsM64(op) +} + +// IsVM32X returns true if op is a vector memory operand with 32-bit XMM index. +func IsVM32X(op Op) bool { + return IsVmx(op) +} + +// IsVM64X returns true if op is a vector memory operand with 64-bit XMM index. +func IsVM64X(op Op) bool { + return IsVmx(op) +} + +// IsVmx returns true if op is a vector memory operand with XMM index. +func IsVmx(op Op) bool { + return isvm(op, IsXMM) +} + +// IsVM32Y returns true if op is a vector memory operand with 32-bit YMM index. +func IsVM32Y(op Op) bool { + return IsVmy(op) +} + +// IsVM64Y returns true if op is a vector memory operand with 64-bit YMM index. +func IsVM64Y(op Op) bool { + return IsVmy(op) +} + +// IsVmy returns true if op is a vector memory operand with YMM index. +func IsVmy(op Op) bool { + return isvm(op, IsYMM) +} + +func isvm(op Op, idx func(Op) bool) bool { + m, ok := op.(Mem) + return ok && IsR64(m.Base) && idx(m.Index) +} + +// IsREL8 returns true if op is an 8-bit offset relative to instruction pointer. +func IsREL8(op Op) bool { + r, ok := op.(Rel) + return ok && r == Rel(int8(r)) +} + +// IsREL32 returns true if op is an offset relative to instruction pointer, or a +// label reference. +func IsREL32(op Op) bool { + // TODO(mbm): should labels be considered separately? + _, rel := op.(Rel) + _, label := op.(LabelRef) + return rel || label +} diff --git a/vendor/github.com/mmcloughlin/avo/operand/const.go b/vendor/github.com/mmcloughlin/avo/operand/const.go new file mode 100644 index 00000000..b2c6a6f7 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/operand/const.go @@ -0,0 +1,36 @@ +package operand + +import "fmt" + +// Constant represents a constant literal. +type Constant interface { + Op + Bytes() int + constant() +} + +//go:generate go run make_const.go -output zconst.go + +// String is a string constant. +type String string + +// Asm returns an assembly syntax representation of the string s. +func (s String) Asm() string { return fmt.Sprintf("$%q", s) } + +// Bytes returns the length of s. +func (s String) Bytes() int { return len(s) } + +func (s String) constant() {} + +// Imm returns an unsigned integer constant with size guessed from x. +func Imm(x uint64) Constant { + switch { + case uint64(uint8(x)) == x: + return U8(x) + case uint64(uint16(x)) == x: + return U16(x) + case uint64(uint32(x)) == x: + return U32(x) + } + return U64(x) +} diff --git a/vendor/github.com/mmcloughlin/avo/operand/doc.go b/vendor/github.com/mmcloughlin/avo/operand/doc.go new file mode 100644 index 00000000..51c44dfb --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/operand/doc.go @@ -0,0 +1,2 @@ +// Package operand provides types for instruction operands. +package operand diff --git a/vendor/github.com/mmcloughlin/avo/operand/make_const.go b/vendor/github.com/mmcloughlin/avo/operand/make_const.go new file mode 100644 index 00000000..42262c73 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/operand/make_const.go @@ -0,0 +1,73 @@ +// +build ignore + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io" + "log" + "os" + "path/filepath" + "runtime" + "strconv" +) + +var output = flag.String("output", "", "path to output file (default stdout)") + +func PrintConstType(w io.Writer, name, typ, format string, size int, doc string) { + r := typ[0] + fmt.Fprintf(w, "// %s\n", doc) + fmt.Fprintf(w, "type %s %s\n", name, typ) + fmt.Fprintf(w, "\n") + fmt.Fprintf(w, "func (%c %s) Asm() string { return fmt.Sprintf(\"$%s\", %c) }\n", r, name, format, r) + fmt.Fprintf(w, "func (%c %s) Bytes() int { return %d }\n", r, name, size) + fmt.Fprintf(w, "func (%c %s) constant() {}\n", r, name) + fmt.Fprintf(w, "\n") +} + +func PrintConstTypes(w io.Writer) { + _, self, _, _ := runtime.Caller(0) + fmt.Fprintf(w, "// Code generated by %s. DO NOT EDIT.\n\n", filepath.Base(self)) + fmt.Fprintf(w, "package operand\n\n") + fmt.Fprintf(w, "import \"fmt\"\n\n") + for n := 1; n <= 8; n *= 2 { + bits := n * 8 + bs := strconv.Itoa(bits) + + if n >= 4 { + PrintConstType(w, "F"+bs, "float"+bs, "(%#v)", n, fmt.Sprintf("F%d is a %d-bit floating point constant.", bits, bits)) + } + PrintConstType(w, "I"+bs, "int"+bs, "%+d", n, fmt.Sprintf("I%d is a %d-bit signed integer constant.", bits, bits)) + PrintConstType(w, "U"+bs, "uint"+bs, "%#0"+strconv.Itoa(2*n)+"x", n, fmt.Sprintf("U%d is a %d-bit unsigned integer constant.", bits, bits)) + } +} + +func main() { + flag.Parse() + + w := os.Stdout + if *output != "" { + f, err := os.Create(*output) + if err != nil { + log.Fatal(err) + } + defer f.Close() + w = f + } + + buf := bytes.NewBuffer(nil) + PrintConstTypes(buf) + + src, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatal(err) + } + + _, err = w.Write(src) + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/mmcloughlin/avo/operand/types.go b/vendor/github.com/mmcloughlin/avo/operand/types.go new file mode 100644 index 00000000..3ca07e4b --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/operand/types.go @@ -0,0 +1,151 @@ +package operand + +import ( + "fmt" + + "github.com/mmcloughlin/avo/reg" +) + +// Op is an operand. +type Op interface { + Asm() string +} + +// Symbol represents a symbol name. +type Symbol struct { + Name string + Static bool // only visible in current source file +} + +// NewStaticSymbol builds a static Symbol. Static symbols are only visible in the current source file. +func NewStaticSymbol(name string) Symbol { + return Symbol{Name: name, Static: true} +} + +func (s Symbol) String() string { + n := s.Name + if s.Static { + n += "<>" + } + return n +} + +// Mem represents a memory reference. +type Mem struct { + Symbol Symbol + Disp int + Base reg.Register + Index reg.Register + Scale uint8 +} + +// NewParamAddr is a convenience to build a Mem operand pointing to a function +// parameter, which is a named offset from the frame pointer pseudo register. +func NewParamAddr(name string, offset int) Mem { + return Mem{ + Symbol: Symbol{ + Name: name, + Static: false, + }, + Disp: offset, + Base: reg.FramePointer, + } +} + +// NewStackAddr returns a memory reference relative to the stack pointer. +func NewStackAddr(offset int) Mem { + return Mem{ + Disp: offset, + Base: reg.StackPointer, + } +} + +// NewDataAddr returns a memory reference relative to the named data symbol. +func NewDataAddr(sym Symbol, offset int) Mem { + return Mem{ + Symbol: sym, + Disp: offset, + Base: reg.StaticBase, + } +} + +// Offset returns a reference to m plus idx bytes. +func (m Mem) Offset(idx int) Mem { + a := m + a.Disp += idx + return a +} + +// Idx returns a new memory reference with (Index, Scale) set to (r, s). +func (m Mem) Idx(r reg.Register, s uint8) Mem { + a := m + a.Index = r + a.Scale = s + return a +} + +// Asm returns an assembly syntax representation of m. +func (m Mem) Asm() string { + a := m.Symbol.String() + if a != "" { + a += fmt.Sprintf("%+d", m.Disp) + } else if m.Disp != 0 { + a += fmt.Sprintf("%d", m.Disp) + } + if m.Base != nil { + a += fmt.Sprintf("(%s)", m.Base.Asm()) + } + if m.Index != nil && m.Scale != 0 { + a += fmt.Sprintf("(%s*%d)", m.Index.Asm(), m.Scale) + } + return a +} + +// Rel is an offset relative to the instruction pointer. +type Rel int32 + +// Asm returns an assembly syntax representation of r. +func (r Rel) Asm() string { + return fmt.Sprintf(".%+d", r) +} + +// LabelRef is a reference to a label. +type LabelRef string + +// Asm returns an assembly syntax representation of l. +func (l LabelRef) Asm() string { + return string(l) +} + +// Registers returns the list of all operands involved in the given operand. +func Registers(op Op) []reg.Register { + switch op := op.(type) { + case reg.Register: + return []reg.Register{op} + case Mem: + var r []reg.Register + if op.Base != nil { + r = append(r, op.Base) + } + if op.Index != nil { + r = append(r, op.Index) + } + return r + case Constant, Rel, LabelRef: + return nil + } + panic("unknown operand type") +} + +// ApplyAllocation returns an operand with allocated registers replaced. Registers missing from the allocation are left alone. +func ApplyAllocation(op Op, a reg.Allocation) Op { + switch op := op.(type) { + case reg.Register: + return a.LookupDefault(op) + case Mem: + op.Base = a.LookupDefault(op.Base) + op.Index = a.LookupDefault(op.Index) + return op + } + return op +} diff --git a/vendor/github.com/mmcloughlin/avo/operand/zconst.go b/vendor/github.com/mmcloughlin/avo/operand/zconst.go new file mode 100644 index 00000000..324b4a96 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/operand/zconst.go @@ -0,0 +1,75 @@ +// Code generated by make_const.go. DO NOT EDIT. + +package operand + +import "fmt" + +// I8 is a 8-bit signed integer constant. +type I8 int8 + +func (i I8) Asm() string { return fmt.Sprintf("$%+d", i) } +func (i I8) Bytes() int { return 1 } +func (i I8) constant() {} + +// U8 is a 8-bit unsigned integer constant. +type U8 uint8 + +func (u U8) Asm() string { return fmt.Sprintf("$%#02x", u) } +func (u U8) Bytes() int { return 1 } +func (u U8) constant() {} + +// I16 is a 16-bit signed integer constant. +type I16 int16 + +func (i I16) Asm() string { return fmt.Sprintf("$%+d", i) } +func (i I16) Bytes() int { return 2 } +func (i I16) constant() {} + +// U16 is a 16-bit unsigned integer constant. +type U16 uint16 + +func (u U16) Asm() string { return fmt.Sprintf("$%#04x", u) } +func (u U16) Bytes() int { return 2 } +func (u U16) constant() {} + +// F32 is a 32-bit floating point constant. +type F32 float32 + +func (f F32) Asm() string { return fmt.Sprintf("$(%#v)", f) } +func (f F32) Bytes() int { return 4 } +func (f F32) constant() {} + +// I32 is a 32-bit signed integer constant. +type I32 int32 + +func (i I32) Asm() string { return fmt.Sprintf("$%+d", i) } +func (i I32) Bytes() int { return 4 } +func (i I32) constant() {} + +// U32 is a 32-bit unsigned integer constant. +type U32 uint32 + +func (u U32) Asm() string { return fmt.Sprintf("$%#08x", u) } +func (u U32) Bytes() int { return 4 } +func (u U32) constant() {} + +// F64 is a 64-bit floating point constant. +type F64 float64 + +func (f F64) Asm() string { return fmt.Sprintf("$(%#v)", f) } +func (f F64) Bytes() int { return 8 } +func (f F64) constant() {} + +// I64 is a 64-bit signed integer constant. +type I64 int64 + +func (i I64) Asm() string { return fmt.Sprintf("$%+d", i) } +func (i I64) Bytes() int { return 8 } +func (i I64) constant() {} + +// U64 is a 64-bit unsigned integer constant. +type U64 uint64 + +func (u U64) Asm() string { return fmt.Sprintf("$%#016x", u) } +func (u U64) Bytes() int { return 8 } +func (u U64) constant() {} diff --git a/vendor/github.com/mmcloughlin/avo/pass/alloc.go b/vendor/github.com/mmcloughlin/avo/pass/alloc.go new file mode 100644 index 00000000..c67a211c --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/pass/alloc.go @@ -0,0 +1,188 @@ +package pass + +import ( + "errors" + "math" + + "github.com/mmcloughlin/avo/reg" +) + +// edge is an edge of the interference graph, indicating that registers X and Y +// must be in non-conflicting registers. +type edge struct { + X, Y reg.Register +} + +// Allocator is a graph-coloring register allocator. +type Allocator struct { + registers []reg.Physical + allocation reg.Allocation + edges []*edge + possible map[reg.Virtual][]reg.Physical + vidtopid map[reg.VID]reg.PID +} + +// NewAllocator builds an allocator for the given physical registers. +func NewAllocator(rs []reg.Physical) (*Allocator, error) { + if len(rs) == 0 { + return nil, errors.New("no registers") + } + return &Allocator{ + registers: rs, + allocation: reg.NewEmptyAllocation(), + possible: map[reg.Virtual][]reg.Physical{}, + vidtopid: map[reg.VID]reg.PID{}, + }, nil +} + +// NewAllocatorForKind builds an allocator for the given kind of registers. +func NewAllocatorForKind(k reg.Kind) (*Allocator, error) { + f := reg.FamilyOfKind(k) + if f == nil { + return nil, errors.New("unknown register family") + } + return NewAllocator(f.Registers()) +} + +// AddInterferenceSet records that r interferes with every register in s. Convenience wrapper around AddInterference. +func (a *Allocator) AddInterferenceSet(r reg.Register, s reg.Set) { + for y := range s { + a.AddInterference(r, y) + } +} + +// AddInterference records that x and y must be assigned to non-conflicting physical registers. +func (a *Allocator) AddInterference(x, y reg.Register) { + a.Add(x) + a.Add(y) + a.edges = append(a.edges, &edge{X: x, Y: y}) +} + +// Add adds a register to be allocated. Does nothing if the register has already been added. +func (a *Allocator) Add(r reg.Register) { + v, ok := r.(reg.Virtual) + if !ok { + return + } + if _, found := a.possible[v]; found { + return + } + a.possible[v] = a.possibleregisters(v) +} + +// Allocate allocates physical registers. +func (a *Allocator) Allocate() (reg.Allocation, error) { + for { + if err := a.update(); err != nil { + return nil, err + } + + if a.remaining() == 0 { + break + } + + v := a.mostrestricted() + if err := a.alloc(v); err != nil { + return nil, err + } + } + return a.allocation, nil +} + +// update possible allocations based on edges. +func (a *Allocator) update() error { + for v := range a.possible { + pid, found := a.vidtopid[v.VirtualID()] + if !found { + continue + } + a.possible[v] = filterregisters(a.possible[v], func(r reg.Physical) bool { + return r.PhysicalID() == pid + }) + } + + var rem []*edge + for _, e := range a.edges { + e.X, e.Y = a.allocation.LookupDefault(e.X), a.allocation.LookupDefault(e.Y) + + px, py := reg.ToPhysical(e.X), reg.ToPhysical(e.Y) + vx, vy := reg.ToVirtual(e.X), reg.ToVirtual(e.Y) + + switch { + case vx != nil && vy != nil: + rem = append(rem, e) + continue + case px != nil && py != nil: + if reg.AreConflicting(px, py) { + return errors.New("impossible register allocation") + } + case px != nil && vy != nil: + a.discardconflicting(vy, px) + case vx != nil && py != nil: + a.discardconflicting(vx, py) + default: + panic("unreachable") + } + } + a.edges = rem + + return nil +} + +// mostrestricted returns the virtual register with the least possibilities. +func (a *Allocator) mostrestricted() reg.Virtual { + n := int(math.MaxInt32) + var v reg.Virtual + for r, p := range a.possible { + if len(p) < n || (len(p) == n && v != nil && r.VirtualID() < v.VirtualID()) { + n = len(p) + v = r + } + } + return v +} + +// discardconflicting removes registers from vs possible list that conflict with p. +func (a *Allocator) discardconflicting(v reg.Virtual, p reg.Physical) { + a.possible[v] = filterregisters(a.possible[v], func(r reg.Physical) bool { + if pid, found := a.vidtopid[v.VirtualID()]; found && pid == p.PhysicalID() { + return true + } + return !reg.AreConflicting(r, p) + }) +} + +// alloc attempts to allocate a register to v. +func (a *Allocator) alloc(v reg.Virtual) error { + ps := a.possible[v] + if len(ps) == 0 { + return errors.New("failed to allocate registers") + } + p := ps[0] + a.allocation[v] = p + delete(a.possible, v) + a.vidtopid[v.VirtualID()] = p.PhysicalID() + return nil +} + +// remaining returns the number of unallocated registers. +func (a *Allocator) remaining() int { + return len(a.possible) +} + +// possibleregisters returns all allocate-able registers for the given virtual. +func (a *Allocator) possibleregisters(v reg.Virtual) []reg.Physical { + return filterregisters(a.registers, func(r reg.Physical) bool { + return v.SatisfiedBy(r) && (r.Info()®.Restricted) == 0 + }) +} + +func filterregisters(in []reg.Physical, predicate func(reg.Physical) bool) []reg.Physical { + var rs []reg.Physical + for _, r := range in { + if predicate(r) { + rs = append(rs, r) + } + } + return rs +} diff --git a/vendor/github.com/mmcloughlin/avo/pass/cfg.go b/vendor/github.com/mmcloughlin/avo/pass/cfg.go new file mode 100644 index 00000000..198208de --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/pass/cfg.go @@ -0,0 +1,85 @@ +package pass + +import ( + "errors" + "fmt" + + "github.com/mmcloughlin/avo/ir" +) + +// LabelTarget populates the LabelTarget of the given function. This maps from +// label name to the following instruction. +func LabelTarget(fn *ir.Function) error { + target := map[ir.Label]*ir.Instruction{} + var empty ir.Label + pending := empty + for _, node := range fn.Nodes { + switch n := node.(type) { + case ir.Label: + if pending != empty { + return errors.New("instruction should follow a label") + } + pending = n + if _, found := target[pending]; found { + return fmt.Errorf("duplicate label \"%s\"", pending) + } + case *ir.Instruction: + if pending != empty { + target[pending] = n + pending = empty + } + } + } + if pending != empty { + return errors.New("function ends with label") + } + fn.LabelTarget = target + return nil +} + +// CFG constructs the call-flow-graph for the function. +func CFG(fn *ir.Function) error { + is := fn.Instructions() + n := len(is) + + // Populate successors. + for i := 0; i < n; i++ { + cur := is[i] + var nxt *ir.Instruction + if i+1 < n { + nxt = is[i+1] + } + + // If it's a branch, locate the target. + if cur.IsBranch { + lbl := cur.TargetLabel() + if lbl == nil { + return errors.New("no label for branch instruction") + } + target, found := fn.LabelTarget[*lbl] + if !found { + return fmt.Errorf("unknown label %q", *lbl) + } + cur.Succ = append(cur.Succ, target) + } + + // Otherwise, could continue to the following instruction. + switch { + case cur.IsTerminal: + case cur.IsBranch && !cur.IsConditional: + default: + cur.Succ = append(cur.Succ, nxt) + } + } + + // Populate predecessors. + for _, i := range is { + for _, s := range i.Succ { + if s != nil { + s.Pred = append(s.Pred, i) + } + } + } + + return nil +} diff --git a/vendor/github.com/mmcloughlin/avo/pass/cleanup.go b/vendor/github.com/mmcloughlin/avo/pass/cleanup.go new file mode 100644 index 00000000..d91250f3 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/pass/cleanup.go @@ -0,0 +1,123 @@ +package pass + +import ( + "github.com/mmcloughlin/avo/ir" + "github.com/mmcloughlin/avo/operand" +) + +// PruneJumpToFollowingLabel removes jump instructions that target an +// immediately following label. +func PruneJumpToFollowingLabel(fn *ir.Function) error { + for i := 0; i+1 < len(fn.Nodes); i++ { + node := fn.Nodes[i] + next := fn.Nodes[i+1] + + // This node is an unconditional jump. + inst, ok := node.(*ir.Instruction) + if !ok || !inst.IsBranch || inst.IsConditional { + continue + } + + target := inst.TargetLabel() + if target == nil { + continue + } + + // And the jump target is the immediately following node. + lbl, ok := next.(ir.Label) + if !ok || lbl != *target { + continue + } + + // Then the jump is unnecessary and can be removed. + fn.Nodes = deletenode(fn.Nodes, i) + i-- + } + + return nil +} + +// PruneDanglingLabels removes labels that are not referenced by any branches. +func PruneDanglingLabels(fn *ir.Function) error { + // Count label references. + count := map[ir.Label]int{} + for _, n := range fn.Nodes { + i, ok := n.(*ir.Instruction) + if !ok || !i.IsBranch { + continue + } + + target := i.TargetLabel() + if target == nil { + continue + } + + count[*target]++ + } + + // Look for labels with no references. + for i := 0; i < len(fn.Nodes); i++ { + node := fn.Nodes[i] + lbl, ok := node.(ir.Label) + if !ok { + continue + } + + if count[lbl] == 0 { + fn.Nodes = deletenode(fn.Nodes, i) + i-- + } + } + + return nil +} + +// PruneSelfMoves removes move instructions from one register to itself. +func PruneSelfMoves(fn *ir.Function) error { + return removeinstructions(fn, func(i *ir.Instruction) bool { + switch i.Opcode { + case "MOVB", "MOVW", "MOVL", "MOVQ": + default: + return false + } + + return operand.IsRegister(i.Operands[0]) && operand.IsRegister(i.Operands[1]) && i.Operands[0] == i.Operands[1] + }) +} + +// removeinstructions deletes instructions from the given function which match predicate. +func removeinstructions(fn *ir.Function, predicate func(*ir.Instruction) bool) error { + // Removal of instructions has the potential to invalidate CFG structures. + // Clear them to prevent accidental use of stale structures after this pass. + invalidatecfg(fn) + + for i := 0; i < len(fn.Nodes); i++ { + n := fn.Nodes[i] + + inst, ok := n.(*ir.Instruction) + if !ok || !predicate(inst) { + continue + } + + fn.Nodes = deletenode(fn.Nodes, i) + } + + return nil +} + +// deletenode deletes node i from nodes and returns the resulting slice. +func deletenode(nodes []ir.Node, i int) []ir.Node { + n := len(nodes) + copy(nodes[i:], nodes[i+1:]) + nodes[n-1] = nil + return nodes[:n-1] +} + +// invalidatecfg clears CFG structures. +func invalidatecfg(fn *ir.Function) { + fn.LabelTarget = nil + for _, i := range fn.Instructions() { + i.Pred = nil + i.Succ = nil + } +} diff --git a/vendor/github.com/mmcloughlin/avo/pass/pass.go b/vendor/github.com/mmcloughlin/avo/pass/pass.go new file mode 100644 index 00000000..6b99e717 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/pass/pass.go @@ -0,0 +1,81 @@ +// Package pass implements processing passes on avo Files. +package pass + +import ( + "io" + + "github.com/mmcloughlin/avo/ir" + "github.com/mmcloughlin/avo/printer" +) + +// Compile pass compiles an avo file. Upon successful completion the avo file +// may be printed to Go assembly. +var Compile = Concat( + FunctionPass(PruneJumpToFollowingLabel), + FunctionPass(PruneDanglingLabels), + FunctionPass(LabelTarget), + FunctionPass(CFG), + FunctionPass(Liveness), + FunctionPass(AllocateRegisters), + FunctionPass(BindRegisters), + FunctionPass(VerifyAllocation), + Func(IncludeTextFlagHeader), + FunctionPass(PruneSelfMoves), +) + +// Interface for a processing pass. +type Interface interface { + Execute(*ir.File) error +} + +// Func adapts a function to the pass Interface. +type Func func(*ir.File) error + +// Execute calls p. +func (p Func) Execute(f *ir.File) error { + return p(f) +} + +// FunctionPass is a convenience for implementing a full file pass with a +// function that operates on each avo Function independently. +type FunctionPass func(*ir.Function) error + +// Execute calls p on every function in the file. Exits on the first error. +func (p FunctionPass) Execute(f *ir.File) error { + for _, fn := range f.Functions() { + if err := p(fn); err != nil { + return err + } + } + return nil +} + +// Concat returns a pass that executes the given passes in order, stopping on the first error. +func Concat(passes ...Interface) Interface { + return Func(func(f *ir.File) error { + for _, p := range passes { + if err := p.Execute(f); err != nil { + return err + } + } + return nil + }) +} + +// Output pass prints a file. +type Output struct { + Writer io.WriteCloser + Printer printer.Printer +} + +// Execute prints f with the configured Printer and writes output to Writer. +func (o *Output) Execute(f *ir.File) error { + b, err := o.Printer.Print(f) + if err != nil { + return err + } + if _, err = o.Writer.Write(b); err != nil { + return err + } + return o.Writer.Close() +} diff --git a/vendor/github.com/mmcloughlin/avo/pass/reg.go b/vendor/github.com/mmcloughlin/avo/pass/reg.go new file mode 100644 index 00000000..6d6e2153 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/pass/reg.go @@ -0,0 +1,134 @@ +package pass + +import ( + "errors" + + "github.com/mmcloughlin/avo/ir" + "github.com/mmcloughlin/avo/operand" + "github.com/mmcloughlin/avo/reg" +) + +// Liveness computes register liveness. +func Liveness(fn *ir.Function) error { + // Note this implementation is initially naive so as to be "obviously correct". + // There are a well-known optimizations we can apply if necessary. + + is := fn.Instructions() + + // Process instructions in reverse: poor approximation to topological sort. + // TODO(mbm): process instructions in topological sort order + for l, r := 0, len(is)-1; l < r; l, r = l+1, r-1 { + is[l], is[r] = is[r], is[l] + } + + // Initialize. + for _, i := range is { + i.LiveIn = reg.NewSetFromSlice(i.InputRegisters()) + i.LiveOut = reg.NewEmptySet() + } + + // Iterative dataflow analysis. + for { + changes := false + + for _, i := range is { + // out[n] = UNION[s IN succ[n]] in[s] + nout := len(i.LiveOut) + for _, s := range i.Succ { + if s == nil { + continue + } + i.LiveOut.Update(s.LiveIn) + } + if len(i.LiveOut) != nout { + changes = true + } + + // in[n] = use[n] UNION (out[n] - def[n]) + nin := len(i.LiveIn) + def := reg.NewSetFromSlice(i.OutputRegisters()) + i.LiveIn.Update(i.LiveOut.Difference(def)) + for r := range i.LiveOut { + if _, found := def[r]; !found { + i.LiveIn.Add(r) + } + } + if len(i.LiveIn) != nin { + changes = true + } + } + + if !changes { + break + } + } + + return nil +} + +// AllocateRegisters performs register allocation. +func AllocateRegisters(fn *ir.Function) error { + // Populate allocators (one per kind). + as := map[reg.Kind]*Allocator{} + for _, i := range fn.Instructions() { + for _, r := range i.Registers() { + k := r.Kind() + if _, found := as[k]; !found { + a, err := NewAllocatorForKind(k) + if err != nil { + return err + } + as[k] = a + } + as[k].Add(r) + } + } + + // Record register interferences. + for _, i := range fn.Instructions() { + for _, d := range i.OutputRegisters() { + k := d.Kind() + out := i.LiveOut.OfKind(k) + out.Discard(d) + as[k].AddInterferenceSet(d, out) + } + } + + // Execute register allocation. + fn.Allocation = reg.NewEmptyAllocation() + for _, a := range as { + al, err := a.Allocate() + if err != nil { + return err + } + if err := fn.Allocation.Merge(al); err != nil { + return err + } + } + + return nil +} + +// BindRegisters applies the result of register allocation, replacing all virtual registers with their assigned physical registers. +func BindRegisters(fn *ir.Function) error { + for _, i := range fn.Instructions() { + for idx := range i.Operands { + i.Operands[idx] = operand.ApplyAllocation(i.Operands[idx], fn.Allocation) + } + } + return nil +} + +// VerifyAllocation performs sanity checks following register allocation. +func VerifyAllocation(fn *ir.Function) error { + // All registers should be physical. + for _, i := range fn.Instructions() { + for _, r := range i.Registers() { + if reg.ToPhysical(r) == nil { + return errors.New("non physical register found") + } + } + } + + return nil +} diff --git a/vendor/github.com/mmcloughlin/avo/pass/textflag.go b/vendor/github.com/mmcloughlin/avo/pass/textflag.go new file mode 100644 index 00000000..35a848b8 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/pass/textflag.go @@ -0,0 +1,42 @@ +package pass + +import ( + "github.com/mmcloughlin/avo/attr" + "github.com/mmcloughlin/avo/ir" +) + +// IncludeTextFlagHeader includes textflag.h if necessary. +func IncludeTextFlagHeader(f *ir.File) error { + const textflagheader = "textflag.h" + + // Check if we already have it. + for _, path := range f.Includes { + if path == textflagheader { + return nil + } + } + + // Add it if necessary. + if requirestextflags(f) { + f.Includes = append(f.Includes, textflagheader) + } + + return nil +} + +// requirestextflags returns whether the file uses flags in the textflags.h header. +func requirestextflags(f *ir.File) bool { + for _, s := range f.Sections { + var a attr.Attribute + switch s := s.(type) { + case *ir.Function: + a = s.Attributes + case *ir.Global: + a = s.Attributes + } + if a.ContainsTextFlags() { + return true + } + } + return false +} diff --git a/vendor/github.com/mmcloughlin/avo/printer/goasm.go b/vendor/github.com/mmcloughlin/avo/printer/goasm.go new file mode 100644 index 00000000..66522a55 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/printer/goasm.go @@ -0,0 +1,157 @@ +package printer + +import ( + "fmt" + "strconv" + "strings" + "text/tabwriter" + + "github.com/mmcloughlin/avo/internal/prnt" + "github.com/mmcloughlin/avo/ir" + "github.com/mmcloughlin/avo/operand" +) + +// dot is the pesky unicode dot used in Go assembly. +const dot = "\u00b7" + +type goasm struct { + cfg Config + prnt.Generator +} + +// NewGoAsm constructs a printer for writing Go assembly files. +func NewGoAsm(cfg Config) Printer { + return &goasm{cfg: cfg} +} + +func (p *goasm) Print(f *ir.File) ([]byte, error) { + p.header(f) + for _, s := range f.Sections { + switch s := s.(type) { + case *ir.Function: + p.function(s) + case *ir.Global: + p.global(s) + default: + panic("unknown section type") + } + } + return p.Result() +} + +func (p *goasm) header(f *ir.File) { + p.Comment(p.cfg.GeneratedWarning()) + + if len(f.Constraints) > 0 { + p.NL() + p.Printf(f.Constraints.GoString()) + } + + if len(f.Includes) > 0 { + p.NL() + p.includes(f.Includes) + } +} + +func (p *goasm) includes(paths []string) { + for _, path := range paths { + p.Printf("#include \"%s\"\n", path) + } +} + +func (p *goasm) function(f *ir.Function) { + p.NL() + p.Comment(f.Stub()) + + // Reference: https://github.com/golang/go/blob/b115207baf6c2decc3820ada4574ef4e5ad940ec/src/cmd/internal/obj/util.go#L166-L176 + // + // if p.As == ATEXT { + // // If there are attributes, print them. Otherwise, skip the comma. + // // In short, print one of these two: + // // TEXT foo(SB), DUPOK|NOSPLIT, $0 + // // TEXT foo(SB), $0 + // s := p.From.Sym.Attribute.TextAttrString() + // if s != "" { + // fmt.Fprintf(&buf, "%s%s", sep, s) + // sep = ", " + // } + // } + // + p.Printf("TEXT %s%s(SB)", dot, f.Name) + if f.Attributes != 0 { + p.Printf(", %s", f.Attributes.Asm()) + } + p.Printf(", %s\n", textsize(f)) + + w := p.tabwriter() + clear := true + flush := func() { + w.Flush() + w = p.tabwriter() + if !clear { + p.NL() + clear = true + } + } + for _, node := range f.Nodes { + switch n := node.(type) { + case *ir.Instruction: + leader := []byte{tabwriter.Escape, '\t', tabwriter.Escape} + fmt.Fprint(w, string(leader)+n.Opcode) + if len(n.Operands) > 0 { + fmt.Fprintf(w, "\t%s", joinOperands(n.Operands)) + } + fmt.Fprint(w, "\n") + clear = false + case ir.Label: + flush() + p.Printf("%s:\n", n) + case *ir.Comment: + flush() + for _, line := range n.Lines { + p.Printf("\t// %s\n", line) + } + default: + panic("unexpected node type") + } + } + w.Flush() +} + +func (p *goasm) tabwriter() *tabwriter.Writer { + return tabwriter.NewWriter(p.Raw(), 4, 4, 1, ' ', tabwriter.StripEscape) +} + +func (p *goasm) global(g *ir.Global) { + p.NL() + for _, d := range g.Data { + a := operand.NewDataAddr(g.Symbol, d.Offset) + p.Printf("DATA %s/%d, %s\n", a.Asm(), d.Value.Bytes(), d.Value.Asm()) + } + p.Printf("GLOBL %s(SB), %s, $%d\n", g.Symbol, g.Attributes.Asm(), g.Size) +} + +func textsize(f *ir.Function) string { + // Reference: https://github.com/golang/go/blob/b115207baf6c2decc3820ada4574ef4e5ad940ec/src/cmd/internal/obj/util.go#L260-L265 + // + // case TYPE_TEXTSIZE: + // if a.Val.(int32) == objabi.ArgsSizeUnknown { + // str = fmt.Sprintf("$%d", a.Offset) + // } else { + // str = fmt.Sprintf("$%d-%d", a.Offset, a.Val.(int32)) + // } + // + s := "$" + strconv.Itoa(f.FrameBytes()) + if argsize := f.ArgumentBytes(); argsize > 0 { + return s + "-" + strconv.Itoa(argsize) + } + return s +} + +func joinOperands(operands []operand.Op) string { + asm := make([]string, len(operands)) + for i, op := range operands { + asm[i] = op.Asm() + } + return strings.Join(asm, ", ") +} diff --git a/vendor/github.com/mmcloughlin/avo/printer/printer.go b/vendor/github.com/mmcloughlin/avo/printer/printer.go new file mode 100644 index 00000000..b562c74e --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/printer/printer.go @@ -0,0 +1,98 @@ +// Package printer implements printing of avo files in various formats. +package printer + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/mmcloughlin/avo/internal/stack" + "github.com/mmcloughlin/avo/ir" +) + +// Printer can produce output for an avo File. +type Printer interface { + Print(*ir.File) ([]byte, error) +} + +// Builder can construct a printer. +type Builder func(Config) Printer + +// Config represents general printing configuration. +type Config struct { + // Command-line arguments passed to the generator. If provided, this will be + // included in a code generation warning. + Argv []string + + // Name of the code generator. + Name string + + // Name of Go package the generated code will belong to. + Pkg string +} + +// NewDefaultConfig produces a config with Name "avo". +// The package name is guessed from the current directory. +func NewDefaultConfig() Config { + return Config{ + Name: "avo", + Pkg: pkg(), + } +} + +// NewArgvConfig constructs a Config from os.Args. +// The package name is guessed from the current directory. +func NewArgvConfig() Config { + return Config{ + Argv: os.Args, + Pkg: pkg(), + } +} + +// NewGoRunConfig produces a Config for a generator that's expected to be +// executed via "go run ...". +func NewGoRunConfig() Config { + path := mainfile() + if path == "" { + return NewDefaultConfig() + } + argv := []string{"go", "run", filepath.Base(path)} + if len(os.Args) > 1 { + argv = append(argv, os.Args[1:]...) + } + return Config{ + Argv: argv, + Pkg: pkg(), + } +} + +// GeneratedBy returns a description of the code generator. +func (c Config) GeneratedBy() string { + if c.Argv == nil { + return c.Name + } + return fmt.Sprintf("command: %s", strings.Join(c.Argv, " ")) +} + +// GeneratedWarning returns text for a code generation warning. Conforms to https://golang.org/s/generatedcode. +func (c Config) GeneratedWarning() string { + return fmt.Sprintf("Code generated by %s. DO NOT EDIT.", c.GeneratedBy()) +} + +// mainfile attempts to determine the file path of the main function by +// inspecting the stack. Returns empty string on failure. +func mainfile() string { + if m := stack.Main(); m != nil { + return m.File + } + return "" +} + +// pkg guesses the name of the package from the working directory. +func pkg() string { + if cwd, err := os.Getwd(); err == nil { + return filepath.Base(cwd) + } + return "" +} diff --git a/vendor/github.com/mmcloughlin/avo/printer/stubs.go b/vendor/github.com/mmcloughlin/avo/printer/stubs.go new file mode 100644 index 00000000..1aedcf07 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/printer/stubs.go @@ -0,0 +1,34 @@ +package printer + +import ( + "github.com/mmcloughlin/avo/internal/prnt" + "github.com/mmcloughlin/avo/ir" +) + +type stubs struct { + cfg Config + prnt.Generator +} + +// NewStubs constructs a printer for writing stub function declarations. +func NewStubs(cfg Config) Printer { + return &stubs{cfg: cfg} +} + +func (s *stubs) Print(f *ir.File) ([]byte, error) { + s.Comment(s.cfg.GeneratedWarning()) + + if len(f.Constraints) > 0 { + s.NL() + s.Printf(f.Constraints.GoString()) + } + + s.NL() + s.Printf("package %s\n", s.cfg.Pkg) + for _, fn := range f.Functions() { + s.NL() + s.Comment(fn.Doc...) + s.Printf("%s\n", fn.Stub()) + } + return s.Result() +} diff --git a/vendor/github.com/mmcloughlin/avo/reg/collection.go b/vendor/github.com/mmcloughlin/avo/reg/collection.go new file mode 100644 index 00000000..5c85104e --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/reg/collection.go @@ -0,0 +1,48 @@ +package reg + +// Collection represents a collection of virtual registers. This is primarily +// useful for allocating virtual registers with distinct IDs. +type Collection struct { + vid map[Kind]VID +} + +// NewCollection builds an empty register collection. +func NewCollection() *Collection { + return &Collection{ + vid: map[Kind]VID{}, + } +} + +// VirtualRegister allocates and returns a new virtual register of the given kind and width. +func (c *Collection) VirtualRegister(k Kind, w Width) Virtual { + vid := c.vid[k] + c.vid[k]++ + return NewVirtual(vid, k, w) +} + +// GP8 allocates and returns a general-purpose 8-bit register. +func (c *Collection) GP8() GPVirtual { return c.GP(B8) } + +// GP16 allocates and returns a general-purpose 16-bit register. +func (c *Collection) GP16() GPVirtual { return c.GP(B16) } + +// GP32 allocates and returns a general-purpose 32-bit register. +func (c *Collection) GP32() GPVirtual { return c.GP(B32) } + +// GP64 allocates and returns a general-purpose 64-bit register. +func (c *Collection) GP64() GPVirtual { return c.GP(B64) } + +// GP allocates and returns a general-purpose register of the given width. +func (c *Collection) GP(w Width) GPVirtual { return newgpv(c.VirtualRegister(KindGP, w)) } + +// XMM allocates and returns a 128-bit vector register. +func (c *Collection) XMM() VecVirtual { return c.Vec(B128) } + +// YMM allocates and returns a 256-bit vector register. +func (c *Collection) YMM() VecVirtual { return c.Vec(B256) } + +// ZMM allocates and returns a 512-bit vector register. +func (c *Collection) ZMM() VecVirtual { return c.Vec(B512) } + +// Vec allocates and returns a vector register of the given width. +func (c *Collection) Vec(w Width) VecVirtual { return newvecv(c.VirtualRegister(KindVector, w)) } diff --git a/vendor/github.com/mmcloughlin/avo/reg/doc.go b/vendor/github.com/mmcloughlin/avo/reg/doc.go new file mode 100644 index 00000000..1c0aee37 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/reg/doc.go @@ -0,0 +1,2 @@ +// Package reg provides types for physical and virtual registers, and definitions of x86-64 register families. +package reg diff --git a/vendor/github.com/mmcloughlin/avo/reg/set.go b/vendor/github.com/mmcloughlin/avo/reg/set.go new file mode 100644 index 00000000..fd1ddf3a --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/reg/set.go @@ -0,0 +1,82 @@ +package reg + +// Set is a set of registers. +type Set map[Register]bool + +// NewEmptySet builds an empty register set. +func NewEmptySet() Set { + return Set{} +} + +// NewSetFromSlice forms a set from the given register list. +func NewSetFromSlice(rs []Register) Set { + s := NewEmptySet() + for _, r := range rs { + s.Add(r) + } + return s +} + +// Clone returns a copy of s. +func (s Set) Clone() Set { + c := NewEmptySet() + for r := range s { + c.Add(r) + } + return c +} + +// Add r to s. +func (s Set) Add(r Register) { + s[r] = true +} + +// Discard removes r from s, if present. +func (s Set) Discard(r Register) { + delete(s, r) +} + +// Update adds every register in t to s. +func (s Set) Update(t Set) { + for r := range t { + s.Add(r) + } +} + +// Difference returns the set of registers in s but not t. +func (s Set) Difference(t Set) Set { + d := s.Clone() + d.DifferenceUpdate(t) + return d +} + +// DifferenceUpdate removes every element of t from s. +func (s Set) DifferenceUpdate(t Set) { + for r := range t { + s.Discard(r) + } +} + +// Equals returns true if s and t contain the same registers. +func (s Set) Equals(t Set) bool { + if len(s) != len(t) { + return false + } + for r := range s { + if _, found := t[r]; !found { + return false + } + } + return true +} + +// OfKind returns the set of elements of s with kind k. +func (s Set) OfKind(k Kind) Set { + t := NewEmptySet() + for r := range s { + if r.Kind() == k { + t.Add(r) + } + } + return t +} diff --git a/vendor/github.com/mmcloughlin/avo/reg/types.go b/vendor/github.com/mmcloughlin/avo/reg/types.go new file mode 100644 index 00000000..cd9f32ac --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/reg/types.go @@ -0,0 +1,269 @@ +package reg + +import ( + "errors" + "fmt" +) + +// Width is a register width. +type Width uint + +// Typical register width values. +const ( + B8 Width = 1 << iota + B16 + B32 + B64 + B128 + B256 + B512 +) + +// Size returns the register width in bytes. +func (w Width) Size() uint { return uint(w) } + +// Kind is a class of registers. +type Kind uint8 + +// Family is a collection of Physical registers of a common kind. +type Family struct { + Kind Kind + registers []Physical +} + +// define builds a register and adds it to the Family. +func (f *Family) define(s Spec, id PID, name string, flags ...Info) Physical { + r := newregister(f, s, id, name, flags...) + f.add(r) + return r +} + +// add r to the family. +func (f *Family) add(r Physical) { + if r.Kind() != f.Kind { + panic("bad kind") + } + f.registers = append(f.registers, r) +} + +// Virtual returns a virtual register from this family's kind. +func (f *Family) Virtual(id VID, w Width) Virtual { + return NewVirtual(id, f.Kind, w) +} + +// Registers returns the registers in this family. +func (f *Family) Registers() []Physical { + return append([]Physical(nil), f.registers...) +} + +// Set returns the set of registers in the family. +func (f *Family) Set() Set { + s := NewEmptySet() + for _, r := range f.registers { + s.Add(r) + } + return s +} + +// Lookup returns the register with given physical ID and spec. Returns nil if no such register exists. +func (f *Family) Lookup(id PID, s Spec) Physical { + for _, r := range f.registers { + if r.PhysicalID() == id && r.Mask() == s.Mask() { + return r + } + } + return nil +} + +// Register represents a virtual or physical register. +type Register interface { + Kind() Kind + Size() uint + Asm() string + as(Spec) Register + register() +} + +// VID is a virtual register ID. +type VID uint16 + +// Virtual is a register of a given type and size, not yet allocated to a physical register. +type Virtual interface { + VirtualID() VID + SatisfiedBy(Physical) bool + Register +} + +// ToVirtual converts r to Virtual if possible, otherwise returns nil. +func ToVirtual(r Register) Virtual { + if v, ok := r.(Virtual); ok { + return v + } + return nil +} + +type virtual struct { + id VID + kind Kind + Width + mask uint16 +} + +// NewVirtual builds a Virtual register. +func NewVirtual(id VID, k Kind, w Width) Virtual { + return virtual{ + id: id, + kind: k, + Width: w, + } +} + +func (v virtual) VirtualID() VID { return v.id } +func (v virtual) Kind() Kind { return v.kind } + +func (v virtual) Asm() string { + // TODO(mbm): decide on virtual register syntax + return fmt.Sprintf("", v.id, v.Kind(), v.Size()) +} + +func (v virtual) SatisfiedBy(p Physical) bool { + return v.Kind() == p.Kind() && v.Size() == p.Size() && (v.mask == 0 || v.mask == p.Mask()) +} + +func (v virtual) as(s Spec) Register { + return virtual{ + id: v.id, + kind: v.kind, + Width: Width(s.Size()), + mask: s.Mask(), + } +} + +func (v virtual) register() {} + +// Info is a bitmask of register properties. +type Info uint8 + +// Defined register Info flags. +const ( + None Info = 0 + Restricted Info = 1 << iota +) + +// PID is a physical register ID. +type PID uint16 + +// Physical is a concrete register. +type Physical interface { + PhysicalID() PID + Mask() uint16 + Info() Info + Register +} + +// ToPhysical converts r to Physical if possible, otherwise returns nil. +func ToPhysical(r Register) Physical { + if p, ok := r.(Physical); ok { + return p + } + return nil +} + +// register implements Physical. +type register struct { + family *Family + id PID + name string + info Info + Spec +} + +func newregister(f *Family, s Spec, id PID, name string, flags ...Info) register { + r := register{ + family: f, + id: id, + name: name, + info: None, + Spec: s, + } + for _, flag := range flags { + r.info |= flag + } + return r +} + +func (r register) PhysicalID() PID { return r.id } +func (r register) Kind() Kind { return r.family.Kind } +func (r register) Asm() string { return r.name } +func (r register) Info() Info { return r.info } + +func (r register) as(s Spec) Register { + return r.family.Lookup(r.PhysicalID(), s) +} + +func (r register) register() {} + +// Spec defines the size of a register as well as the bit ranges it occupies in +// an underlying physical register. +type Spec uint16 + +// Spec values required for x86-64. +const ( + S0 Spec = 0x0 // zero value reserved for pseudo registers + S8L Spec = 0x1 + S8H Spec = 0x2 + S8 = S8L + S16 Spec = 0x3 + S32 Spec = 0x7 + S64 Spec = 0xf + S128 Spec = 0x1f + S256 Spec = 0x3f + S512 Spec = 0x7f +) + +// Mask returns a mask representing which bytes of an underlying register are +// used by this register. This is almost always the low bytes, except for the +// case of the high-byte registers. If bit n of the mask is set, this means +// bytes 2^(n-1) to 2^n-1 are used. +func (s Spec) Mask() uint16 { + return uint16(s) +} + +// Size returns the register width in bytes. +func (s Spec) Size() uint { + x := uint(s) + return (x >> 1) + (x & 1) +} + +// AreConflicting returns whether registers conflict with each other. +func AreConflicting(x, y Physical) bool { + return x.Kind() == y.Kind() && x.PhysicalID() == y.PhysicalID() && (x.Mask()&y.Mask()) != 0 +} + +// Allocation records a register allocation. +type Allocation map[Register]Physical + +// NewEmptyAllocation builds an empty register allocation. +func NewEmptyAllocation() Allocation { + return Allocation{} +} + +// Merge allocations from b into a. Errors if there is disagreement on a common +// register. +func (a Allocation) Merge(b Allocation) error { + for r, p := range b { + if alt, found := a[r]; found && alt != p { + return errors.New("disagreement on overlapping register") + } + a[r] = p + } + return nil +} + +// LookupDefault returns the register assigned to r, or r itself if there is none. +func (a Allocation) LookupDefault(r Register) Register { + if p, found := a[r]; found { + return p + } + return r +} diff --git a/vendor/github.com/mmcloughlin/avo/reg/x86.go b/vendor/github.com/mmcloughlin/avo/reg/x86.go new file mode 100644 index 00000000..96316bd5 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/reg/x86.go @@ -0,0 +1,330 @@ +package reg + +// Register kinds. +const ( + KindPseudo Kind = iota + KindGP + KindVector +) + +// Declare register families. +var ( + Pseudo = &Family{Kind: KindPseudo} + GeneralPurpose = &Family{Kind: KindGP} + Vector = &Family{Kind: KindVector} + + Families = []*Family{ + Pseudo, + GeneralPurpose, + Vector, + } +) + +var familiesByKind = map[Kind]*Family{} + +func init() { + for _, f := range Families { + familiesByKind[f.Kind] = f + } +} + +// FamilyOfKind returns the Family of registers of the given kind. +func FamilyOfKind(k Kind) *Family { + return familiesByKind[k] +} + +// Pseudo registers. +var ( + FramePointer = Pseudo.define(S0, 0, "FP") + ProgramCounter = Pseudo.define(S0, 0, "PC") + StaticBase = Pseudo.define(S0, 0, "SB") + StackPointer = Pseudo.define(S0, 0, "SP") +) + +// GP provides additional methods for general purpose registers. +type GP interface { + As8() Register + As8L() Register + As8H() Register + As16() Register + As32() Register + As64() Register +} + +type gpcasts struct { + Register +} + +func (c gpcasts) As8() Register { return c.as(S8) } +func (c gpcasts) As8L() Register { return c.as(S8L) } +func (c gpcasts) As8H() Register { return c.as(S8H) } +func (c gpcasts) As16() Register { return c.as(S16) } +func (c gpcasts) As32() Register { return c.as(S32) } +func (c gpcasts) As64() Register { return c.as(S64) } + +// GPPhysical is a general-purpose physical register. +type GPPhysical interface { + Physical + GP +} + +type gpp struct { + Physical + GP +} + +func newgpp(r Physical) GPPhysical { return gpp{Physical: r, GP: gpcasts{r}} } + +// GPVirtual is a general-purpose virtual register. +type GPVirtual interface { + Virtual + GP +} + +type gpv struct { + Virtual + GP +} + +func newgpv(v Virtual) GPVirtual { return gpv{Virtual: v, GP: gpcasts{v}} } + +func gp(s Spec, id PID, name string, flags ...Info) GPPhysical { + r := newgpp(newregister(GeneralPurpose, s, id, name, flags...)) + GeneralPurpose.add(r) + return r +} + +// General purpose registers. +var ( + // Low byte + AL = gp(S8L, 0, "AL") + CL = gp(S8L, 1, "CL") + DL = gp(S8L, 2, "DL") + BL = gp(S8L, 3, "BL") + + // High byte + AH = gp(S8H, 0, "AH") + CH = gp(S8H, 1, "CH") + DH = gp(S8H, 2, "DH") + BH = gp(S8H, 3, "BH") + + // 8-bit + SPB = gp(S8, 4, "SP", Restricted) + BPB = gp(S8, 5, "BP") + SIB = gp(S8, 6, "SI") + DIB = gp(S8, 7, "DI") + R8B = gp(S8, 8, "R8") + R9B = gp(S8, 9, "R9") + R10B = gp(S8, 10, "R10") + R11B = gp(S8, 11, "R11") + R12B = gp(S8, 12, "R12") + R13B = gp(S8, 13, "R13") + R14B = gp(S8, 14, "R14") + R15B = gp(S8, 15, "R15") + + // 16-bit + AX = gp(S16, 0, "AX") + CX = gp(S16, 1, "CX") + DX = gp(S16, 2, "DX") + BX = gp(S16, 3, "BX") + SP = gp(S16, 4, "SP", Restricted) + BP = gp(S16, 5, "BP") + SI = gp(S16, 6, "SI") + DI = gp(S16, 7, "DI") + R8W = gp(S16, 8, "R8") + R9W = gp(S16, 9, "R9") + R10W = gp(S16, 10, "R10") + R11W = gp(S16, 11, "R11") + R12W = gp(S16, 12, "R12") + R13W = gp(S16, 13, "R13") + R14W = gp(S16, 14, "R14") + R15W = gp(S16, 15, "R15") + + // 32-bit + EAX = gp(S32, 0, "AX") + ECX = gp(S32, 1, "CX") + EDX = gp(S32, 2, "DX") + EBX = gp(S32, 3, "BX") + ESP = gp(S32, 4, "SP", Restricted) + EBP = gp(S32, 5, "BP") + ESI = gp(S32, 6, "SI") + EDI = gp(S32, 7, "DI") + R8L = gp(S32, 8, "R8") + R9L = gp(S32, 9, "R9") + R10L = gp(S32, 10, "R10") + R11L = gp(S32, 11, "R11") + R12L = gp(S32, 12, "R12") + R13L = gp(S32, 13, "R13") + R14L = gp(S32, 14, "R14") + R15L = gp(S32, 15, "R15") + + // 64-bit + RAX = gp(S64, 0, "AX") + RCX = gp(S64, 1, "CX") + RDX = gp(S64, 2, "DX") + RBX = gp(S64, 3, "BX") + RSP = gp(S64, 4, "SP", Restricted) + RBP = gp(S64, 5, "BP") + RSI = gp(S64, 6, "SI") + RDI = gp(S64, 7, "DI") + R8 = gp(S64, 8, "R8") + R9 = gp(S64, 9, "R9") + R10 = gp(S64, 10, "R10") + R11 = gp(S64, 11, "R11") + R12 = gp(S64, 12, "R12") + R13 = gp(S64, 13, "R13") + R14 = gp(S64, 14, "R14") + R15 = gp(S64, 15, "R15") +) + +// Vec provides methods for vector registers. +type Vec interface { + AsX() Register + AsY() Register + AsZ() Register +} + +type veccasts struct { + Register +} + +func (c veccasts) AsX() Register { return c.as(S128) } +func (c veccasts) AsY() Register { return c.as(S256) } +func (c veccasts) AsZ() Register { return c.as(S512) } + +// VecPhysical is a physical vector register. +type VecPhysical interface { + Physical + Vec +} + +type vecp struct { + Physical + Vec +} + +func newvecp(r Physical) VecPhysical { return vecp{Physical: r, Vec: veccasts{r}} } + +// VecVirtual is a virtual vector register. +type VecVirtual interface { + Virtual + Vec +} + +type vecv struct { + Virtual + Vec +} + +func newvecv(v Virtual) VecVirtual { return vecv{Virtual: v, Vec: veccasts{v}} } + +func vec(s Spec, id PID, name string, flags ...Info) VecPhysical { + r := newvecp(newregister(Vector, s, id, name, flags...)) + Vector.add(r) + return r +} + +// Vector registers. +var ( + // 128-bit + X0 = vec(S128, 0, "X0") + X1 = vec(S128, 1, "X1") + X2 = vec(S128, 2, "X2") + X3 = vec(S128, 3, "X3") + X4 = vec(S128, 4, "X4") + X5 = vec(S128, 5, "X5") + X6 = vec(S128, 6, "X6") + X7 = vec(S128, 7, "X7") + X8 = vec(S128, 8, "X8") + X9 = vec(S128, 9, "X9") + X10 = vec(S128, 10, "X10") + X11 = vec(S128, 11, "X11") + X12 = vec(S128, 12, "X12") + X13 = vec(S128, 13, "X13") + X14 = vec(S128, 14, "X14") + X15 = vec(S128, 15, "X15") + X16 = vec(S128, 16, "X16") + X17 = vec(S128, 17, "X17") + X18 = vec(S128, 18, "X18") + X19 = vec(S128, 19, "X19") + X20 = vec(S128, 20, "X20") + X21 = vec(S128, 21, "X21") + X22 = vec(S128, 22, "X22") + X23 = vec(S128, 23, "X23") + X24 = vec(S128, 24, "X24") + X25 = vec(S128, 25, "X25") + X26 = vec(S128, 26, "X26") + X27 = vec(S128, 27, "X27") + X28 = vec(S128, 28, "X28") + X29 = vec(S128, 29, "X29") + X30 = vec(S128, 30, "X30") + X31 = vec(S128, 31, "X31") + + // 256-bit + Y0 = vec(S256, 0, "Y0") + Y1 = vec(S256, 1, "Y1") + Y2 = vec(S256, 2, "Y2") + Y3 = vec(S256, 3, "Y3") + Y4 = vec(S256, 4, "Y4") + Y5 = vec(S256, 5, "Y5") + Y6 = vec(S256, 6, "Y6") + Y7 = vec(S256, 7, "Y7") + Y8 = vec(S256, 8, "Y8") + Y9 = vec(S256, 9, "Y9") + Y10 = vec(S256, 10, "Y10") + Y11 = vec(S256, 11, "Y11") + Y12 = vec(S256, 12, "Y12") + Y13 = vec(S256, 13, "Y13") + Y14 = vec(S256, 14, "Y14") + Y15 = vec(S256, 15, "Y15") + Y16 = vec(S256, 16, "Y16") + Y17 = vec(S256, 17, "Y17") + Y18 = vec(S256, 18, "Y18") + Y19 = vec(S256, 19, "Y19") + Y20 = vec(S256, 20, "Y20") + Y21 = vec(S256, 21, "Y21") + Y22 = vec(S256, 22, "Y22") + Y23 = vec(S256, 23, "Y23") + Y24 = vec(S256, 24, "Y24") + Y25 = vec(S256, 25, "Y25") + Y26 = vec(S256, 26, "Y26") + Y27 = vec(S256, 27, "Y27") + Y28 = vec(S256, 28, "Y28") + Y29 = vec(S256, 29, "Y29") + Y30 = vec(S256, 30, "Y30") + Y31 = vec(S256, 31, "Y31") + + // 512-bit + Z0 = vec(S512, 0, "Z0") + Z1 = vec(S512, 1, "Z1") + Z2 = vec(S512, 2, "Z2") + Z3 = vec(S512, 3, "Z3") + Z4 = vec(S512, 4, "Z4") + Z5 = vec(S512, 5, "Z5") + Z6 = vec(S512, 6, "Z6") + Z7 = vec(S512, 7, "Z7") + Z8 = vec(S512, 8, "Z8") + Z9 = vec(S512, 9, "Z9") + Z10 = vec(S512, 10, "Z10") + Z11 = vec(S512, 11, "Z11") + Z12 = vec(S512, 12, "Z12") + Z13 = vec(S512, 13, "Z13") + Z14 = vec(S512, 14, "Z14") + Z15 = vec(S512, 15, "Z15") + Z16 = vec(S512, 16, "Z16") + Z17 = vec(S512, 17, "Z17") + Z18 = vec(S512, 18, "Z18") + Z19 = vec(S512, 19, "Z19") + Z20 = vec(S512, 20, "Z20") + Z21 = vec(S512, 21, "Z21") + Z22 = vec(S512, 22, "Z22") + Z23 = vec(S512, 23, "Z23") + Z24 = vec(S512, 24, "Z24") + Z25 = vec(S512, 25, "Z25") + Z26 = vec(S512, 26, "Z26") + Z27 = vec(S512, 27, "Z27") + Z28 = vec(S512, 28, "Z28") + Z29 = vec(S512, 29, "Z29") + Z30 = vec(S512, 30, "Z30") + Z31 = vec(S512, 31, "Z31") +) diff --git a/vendor/github.com/mmcloughlin/avo/src/src.go b/vendor/github.com/mmcloughlin/avo/src/src.go new file mode 100644 index 00000000..3a47886e --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/src/src.go @@ -0,0 +1,62 @@ +// Package src provides types for working with source files. +package src + +import ( + "os" + "path/filepath" + "runtime" + "strconv" +) + +// Position represents a position in a source file. +type Position struct { + Filename string + Line int // 1-up +} + +// FramePosition returns the Position of the given stack frame. +func FramePosition(f runtime.Frame) Position { + return Position{ + Filename: f.File, + Line: f.Line, + } +} + +// IsValid reports whether the position is valid: Line must be positive, but +// Filename may be empty. +func (p Position) IsValid() bool { + return p.Line > 0 +} + +// String represents Position as a string. +func (p Position) String() string { + if !p.IsValid() { + return "-" + } + var s string + if p.Filename != "" { + s += p.Filename + ":" + } + s += strconv.Itoa(p.Line) + return s +} + +// Rel returns Position relative to basepath. If the given filename cannot be +// expressed relative to basepath the position will be returned unchanged. +func (p Position) Rel(basepath string) Position { + q := p + if rel, err := filepath.Rel(basepath, q.Filename); err == nil { + q.Filename = rel + } + return q +} + +// Relwd returns Position relative to the current working directory. Returns p +// unchanged if the working directory cannot be determined, or the filename +// cannot be expressed relative to the working directory. +func (p Position) Relwd() Position { + if wd, err := os.Getwd(); err == nil { + return p.Rel(wd) + } + return p +} diff --git a/vendor/github.com/mmcloughlin/avo/x86/doc.go b/vendor/github.com/mmcloughlin/avo/x86/doc.go new file mode 100644 index 00000000..6e4c8ee8 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/x86/doc.go @@ -0,0 +1,2 @@ +// Package x86 provides constructors for all x86-64 instructions. +package x86 diff --git a/vendor/github.com/mmcloughlin/avo/x86/gen.go b/vendor/github.com/mmcloughlin/avo/x86/gen.go new file mode 100644 index 00000000..25d15fa6 --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/x86/gen.go @@ -0,0 +1,4 @@ +package x86 + +//go:generate avogen -output zctors.go ctors +//go:generate avogen -output zctors_test.go ctorstest diff --git a/vendor/github.com/mmcloughlin/avo/x86/zctors.go b/vendor/github.com/mmcloughlin/avo/x86/zctors.go new file mode 100644 index 00000000..3ef1d2fd --- /dev/null +++ b/vendor/github.com/mmcloughlin/avo/x86/zctors.go @@ -0,0 +1,32576 @@ +// Code generated by command: avogen -output zctors.go ctors. DO NOT EDIT. + +package x86 + +import ( + "errors" + intrep "github.com/mmcloughlin/avo/ir" + "github.com/mmcloughlin/avo/operand" + "github.com/mmcloughlin/avo/reg" +) + +// ADCB: Add with Carry. +// +// Forms: +// +// ADCB imm8 al +// ADCB imm8 r8 +// ADCB r8 r8 +// ADCB m8 r8 +// ADCB imm8 m8 +// ADCB r8 m8 +func ADCB(imr, amr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imr) && operand.IsAL(amr): + return &intrep.Instruction{ + Opcode: "ADCB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR8(amr): + return &intrep.Instruction{ + Opcode: "ADCB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR8(imr) && operand.IsR8(amr): + return &intrep.Instruction{ + Opcode: "ADCB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsM8(imr) && operand.IsR8(amr): + return &intrep.Instruction{ + Opcode: "ADCB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM8(amr): + return &intrep.Instruction{ + Opcode: "ADCB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR8(imr) && operand.IsM8(amr): + return &intrep.Instruction{ + Opcode: "ADCB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + } + return nil, errors.New("ADCB: bad operands") +} + +// ADCL: Add with Carry. +// +// Forms: +// +// ADCL imm32 eax +// ADCL imm8 r32 +// ADCL imm32 r32 +// ADCL r32 r32 +// ADCL m32 r32 +// ADCL imm8 m32 +// ADCL imm32 m32 +// ADCL r32 m32 +func ADCL(imr, emr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM32(imr) && operand.IsEAX(emr): + return &intrep.Instruction{ + Opcode: "ADCL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "ADCL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM32(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "ADCL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsR32(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "ADCL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{imr, emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsM32(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "ADCL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{imr, emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM32(emr): + return &intrep.Instruction{ + Opcode: "ADCL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM32(imr) && operand.IsM32(emr): + return &intrep.Instruction{ + Opcode: "ADCL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsR32(imr) && operand.IsM32(emr): + return &intrep.Instruction{ + Opcode: "ADCL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{imr, emr}, + Outputs: []operand.Op{emr}, + }, nil + } + return nil, errors.New("ADCL: bad operands") +} + +// ADCQ: Add with Carry. +// +// Forms: +// +// ADCQ imm32 rax +// ADCQ imm8 r64 +// ADCQ imm32 r64 +// ADCQ r64 r64 +// ADCQ m64 r64 +// ADCQ imm8 m64 +// ADCQ imm32 m64 +// ADCQ r64 m64 +func ADCQ(imr, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM32(imr) && operand.IsRAX(mr): + return &intrep.Instruction{ + Opcode: "ADCQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "ADCQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM32(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "ADCQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR64(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "ADCQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM64(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "ADCQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "ADCQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM32(imr) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "ADCQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR64(imr) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "ADCQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("ADCQ: bad operands") +} + +// ADCW: Add with Carry. +// +// Forms: +// +// ADCW imm16 ax +// ADCW imm8 r16 +// ADCW imm16 r16 +// ADCW r16 r16 +// ADCW m16 r16 +// ADCW imm8 m16 +// ADCW imm16 m16 +// ADCW r16 m16 +func ADCW(imr, amr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM16(imr) && operand.IsAX(amr): + return &intrep.Instruction{ + Opcode: "ADCW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "ADCW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM16(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "ADCW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR16(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "ADCW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsM16(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "ADCW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM16(amr): + return &intrep.Instruction{ + Opcode: "ADCW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM16(imr) && operand.IsM16(amr): + return &intrep.Instruction{ + Opcode: "ADCW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR16(imr) && operand.IsM16(amr): + return &intrep.Instruction{ + Opcode: "ADCW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + } + return nil, errors.New("ADCW: bad operands") +} + +// ADCXL: Unsigned Integer Addition of Two Operands with Carry Flag. +// +// Forms: +// +// ADCXL r32 r32 +// ADCXL m32 r32 +func ADCXL(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "ADCXL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "ADCXL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("ADCXL: bad operands") +} + +// ADCXQ: Unsigned Integer Addition of Two Operands with Carry Flag. +// +// Forms: +// +// ADCXQ r64 r64 +// ADCXQ m64 r64 +func ADCXQ(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "ADCXQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "ADCXQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("ADCXQ: bad operands") +} + +// ADDB: Add. +// +// Forms: +// +// ADDB imm8 al +// ADDB imm8 r8 +// ADDB r8 r8 +// ADDB m8 r8 +// ADDB imm8 m8 +// ADDB r8 m8 +func ADDB(imr, amr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imr) && operand.IsAL(amr): + return &intrep.Instruction{ + Opcode: "ADDB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR8(amr): + return &intrep.Instruction{ + Opcode: "ADDB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR8(imr) && operand.IsR8(amr): + return &intrep.Instruction{ + Opcode: "ADDB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsM8(imr) && operand.IsR8(amr): + return &intrep.Instruction{ + Opcode: "ADDB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM8(amr): + return &intrep.Instruction{ + Opcode: "ADDB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR8(imr) && operand.IsM8(amr): + return &intrep.Instruction{ + Opcode: "ADDB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + } + return nil, errors.New("ADDB: bad operands") +} + +// ADDL: Add. +// +// Forms: +// +// ADDL imm32 eax +// ADDL imm8 r32 +// ADDL imm32 r32 +// ADDL r32 r32 +// ADDL m32 r32 +// ADDL imm8 m32 +// ADDL imm32 m32 +// ADDL r32 m32 +func ADDL(imr, emr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM32(imr) && operand.IsEAX(emr): + return &intrep.Instruction{ + Opcode: "ADDL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "ADDL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM32(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "ADDL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsR32(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "ADDL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{imr, emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsM32(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "ADDL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{imr, emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM32(emr): + return &intrep.Instruction{ + Opcode: "ADDL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM32(imr) && operand.IsM32(emr): + return &intrep.Instruction{ + Opcode: "ADDL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsR32(imr) && operand.IsM32(emr): + return &intrep.Instruction{ + Opcode: "ADDL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{imr, emr}, + Outputs: []operand.Op{emr}, + }, nil + } + return nil, errors.New("ADDL: bad operands") +} + +// ADDPD: Add Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// ADDPD xmm xmm +// ADDPD m128 xmm +func ADDPD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ADDPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ADDPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("ADDPD: bad operands") +} + +// ADDPS: Add Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// ADDPS xmm xmm +// ADDPS m128 xmm +func ADDPS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ADDPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ADDPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("ADDPS: bad operands") +} + +// ADDQ: Add. +// +// Forms: +// +// ADDQ imm32 rax +// ADDQ imm8 r64 +// ADDQ imm32 r64 +// ADDQ r64 r64 +// ADDQ m64 r64 +// ADDQ imm8 m64 +// ADDQ imm32 m64 +// ADDQ r64 m64 +func ADDQ(imr, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM32(imr) && operand.IsRAX(mr): + return &intrep.Instruction{ + Opcode: "ADDQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "ADDQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM32(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "ADDQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR64(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "ADDQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM64(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "ADDQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "ADDQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM32(imr) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "ADDQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR64(imr) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "ADDQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("ADDQ: bad operands") +} + +// ADDSD: Add Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// ADDSD xmm xmm +// ADDSD m64 xmm +func ADDSD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ADDSD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ADDSD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("ADDSD: bad operands") +} + +// ADDSS: Add Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// ADDSS xmm xmm +// ADDSS m32 xmm +func ADDSS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ADDSS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ADDSS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("ADDSS: bad operands") +} + +// ADDSUBPD: Packed Double-FP Add/Subtract. +// +// Forms: +// +// ADDSUBPD xmm xmm +// ADDSUBPD m128 xmm +func ADDSUBPD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ADDSUBPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ADDSUBPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("ADDSUBPD: bad operands") +} + +// ADDSUBPS: Packed Single-FP Add/Subtract. +// +// Forms: +// +// ADDSUBPS xmm xmm +// ADDSUBPS m128 xmm +func ADDSUBPS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ADDSUBPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ADDSUBPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("ADDSUBPS: bad operands") +} + +// ADDW: Add. +// +// Forms: +// +// ADDW imm16 ax +// ADDW imm8 r16 +// ADDW imm16 r16 +// ADDW r16 r16 +// ADDW m16 r16 +// ADDW imm8 m16 +// ADDW imm16 m16 +// ADDW r16 m16 +func ADDW(imr, amr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM16(imr) && operand.IsAX(amr): + return &intrep.Instruction{ + Opcode: "ADDW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "ADDW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM16(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "ADDW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR16(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "ADDW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsM16(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "ADDW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM16(amr): + return &intrep.Instruction{ + Opcode: "ADDW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM16(imr) && operand.IsM16(amr): + return &intrep.Instruction{ + Opcode: "ADDW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR16(imr) && operand.IsM16(amr): + return &intrep.Instruction{ + Opcode: "ADDW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + } + return nil, errors.New("ADDW: bad operands") +} + +// ADOXL: Unsigned Integer Addition of Two Operands with Overflow Flag. +// +// Forms: +// +// ADOXL r32 r32 +// ADOXL m32 r32 +func ADOXL(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "ADOXL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "ADOXL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("ADOXL: bad operands") +} + +// ADOXQ: Unsigned Integer Addition of Two Operands with Overflow Flag. +// +// Forms: +// +// ADOXQ r64 r64 +// ADOXQ m64 r64 +func ADOXQ(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "ADOXQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "ADOXQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("ADOXQ: bad operands") +} + +// AESDEC: Perform One Round of an AES Decryption Flow. +// +// Forms: +// +// AESDEC xmm xmm +// AESDEC m128 xmm +func AESDEC(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "AESDEC", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "AESDEC", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("AESDEC: bad operands") +} + +// AESDECLAST: Perform Last Round of an AES Decryption Flow. +// +// Forms: +// +// AESDECLAST xmm xmm +// AESDECLAST m128 xmm +func AESDECLAST(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "AESDECLAST", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "AESDECLAST", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("AESDECLAST: bad operands") +} + +// AESENC: Perform One Round of an AES Encryption Flow. +// +// Forms: +// +// AESENC xmm xmm +// AESENC m128 xmm +func AESENC(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "AESENC", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "AESENC", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("AESENC: bad operands") +} + +// AESENCLAST: Perform Last Round of an AES Encryption Flow. +// +// Forms: +// +// AESENCLAST xmm xmm +// AESENCLAST m128 xmm +func AESENCLAST(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "AESENCLAST", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "AESENCLAST", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("AESENCLAST: bad operands") +} + +// AESIMC: Perform the AES InvMixColumn Transformation. +// +// Forms: +// +// AESIMC xmm xmm +// AESIMC m128 xmm +func AESIMC(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "AESIMC", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "AESIMC", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("AESIMC: bad operands") +} + +// AESKEYGENASSIST: AES Round Key Generation Assist. +// +// Forms: +// +// AESKEYGENASSIST imm8 xmm xmm +// AESKEYGENASSIST imm8 m128 xmm +func AESKEYGENASSIST(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "AESKEYGENASSIST", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "AESKEYGENASSIST", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("AESKEYGENASSIST: bad operands") +} + +// ANDB: Logical AND. +// +// Forms: +// +// ANDB imm8 al +// ANDB imm8 r8 +// ANDB r8 r8 +// ANDB m8 r8 +// ANDB imm8 m8 +// ANDB r8 m8 +func ANDB(imr, amr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imr) && operand.IsAL(amr): + return &intrep.Instruction{ + Opcode: "ANDB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR8(amr): + return &intrep.Instruction{ + Opcode: "ANDB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR8(imr) && operand.IsR8(amr): + return &intrep.Instruction{ + Opcode: "ANDB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsM8(imr) && operand.IsR8(amr): + return &intrep.Instruction{ + Opcode: "ANDB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM8(amr): + return &intrep.Instruction{ + Opcode: "ANDB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR8(imr) && operand.IsM8(amr): + return &intrep.Instruction{ + Opcode: "ANDB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + } + return nil, errors.New("ANDB: bad operands") +} + +// ANDL: Logical AND. +// +// Forms: +// +// ANDL imm32 eax +// ANDL imm8 r32 +// ANDL imm32 r32 +// ANDL r32 r32 +// ANDL m32 r32 +// ANDL imm8 m32 +// ANDL imm32 m32 +// ANDL r32 m32 +func ANDL(imr, emr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM32(imr) && operand.IsEAX(emr): + return &intrep.Instruction{ + Opcode: "ANDL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "ANDL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM32(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "ANDL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsR32(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "ANDL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{imr, emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsM32(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "ANDL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{imr, emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM32(emr): + return &intrep.Instruction{ + Opcode: "ANDL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM32(imr) && operand.IsM32(emr): + return &intrep.Instruction{ + Opcode: "ANDL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsR32(imr) && operand.IsM32(emr): + return &intrep.Instruction{ + Opcode: "ANDL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{imr, emr}, + Outputs: []operand.Op{emr}, + }, nil + } + return nil, errors.New("ANDL: bad operands") +} + +// ANDNL: Logical AND NOT. +// +// Forms: +// +// ANDNL r32 r32 r32 +// ANDNL m32 r32 r32 +func ANDNL(mr, r, r1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r) && operand.IsR32(r1): + return &intrep.Instruction{ + Opcode: "ANDNL", + Operands: []operand.Op{mr, r, r1}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r1}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r) && operand.IsR32(r1): + return &intrep.Instruction{ + Opcode: "ANDNL", + Operands: []operand.Op{mr, r, r1}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r1}, + }, nil + } + return nil, errors.New("ANDNL: bad operands") +} + +// ANDNPD: Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// ANDNPD xmm xmm +// ANDNPD m128 xmm +func ANDNPD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ANDNPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ANDNPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("ANDNPD: bad operands") +} + +// ANDNPS: Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// ANDNPS xmm xmm +// ANDNPS m128 xmm +func ANDNPS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ANDNPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ANDNPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("ANDNPS: bad operands") +} + +// ANDNQ: Logical AND NOT. +// +// Forms: +// +// ANDNQ r64 r64 r64 +// ANDNQ m64 r64 r64 +func ANDNQ(mr, r, r1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r) && operand.IsR64(r1): + return &intrep.Instruction{ + Opcode: "ANDNQ", + Operands: []operand.Op{mr, r, r1}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r1}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r) && operand.IsR64(r1): + return &intrep.Instruction{ + Opcode: "ANDNQ", + Operands: []operand.Op{mr, r, r1}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r1}, + }, nil + } + return nil, errors.New("ANDNQ: bad operands") +} + +// ANDPD: Bitwise Logical AND of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// ANDPD xmm xmm +// ANDPD m128 xmm +func ANDPD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ANDPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ANDPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("ANDPD: bad operands") +} + +// ANDPS: Bitwise Logical AND of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// ANDPS xmm xmm +// ANDPS m128 xmm +func ANDPS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ANDPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ANDPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("ANDPS: bad operands") +} + +// ANDQ: Logical AND. +// +// Forms: +// +// ANDQ imm32 rax +// ANDQ imm8 r64 +// ANDQ imm32 r64 +// ANDQ r64 r64 +// ANDQ m64 r64 +// ANDQ imm8 m64 +// ANDQ imm32 m64 +// ANDQ r64 m64 +func ANDQ(imr, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM32(imr) && operand.IsRAX(mr): + return &intrep.Instruction{ + Opcode: "ANDQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "ANDQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM32(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "ANDQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR64(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "ANDQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM64(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "ANDQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "ANDQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM32(imr) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "ANDQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR64(imr) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "ANDQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("ANDQ: bad operands") +} + +// ANDW: Logical AND. +// +// Forms: +// +// ANDW imm16 ax +// ANDW imm8 r16 +// ANDW imm16 r16 +// ANDW r16 r16 +// ANDW m16 r16 +// ANDW imm8 m16 +// ANDW imm16 m16 +// ANDW r16 m16 +func ANDW(imr, amr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM16(imr) && operand.IsAX(amr): + return &intrep.Instruction{ + Opcode: "ANDW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "ANDW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM16(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "ANDW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR16(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "ANDW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsM16(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "ANDW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM16(amr): + return &intrep.Instruction{ + Opcode: "ANDW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM16(imr) && operand.IsM16(amr): + return &intrep.Instruction{ + Opcode: "ANDW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR16(imr) && operand.IsM16(amr): + return &intrep.Instruction{ + Opcode: "ANDW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + } + return nil, errors.New("ANDW: bad operands") +} + +// BEXTRL: Bit Field Extract. +// +// Forms: +// +// BEXTRL r32 r32 r32 +// BEXTRL r32 m32 r32 +func BEXTRL(r, mr, r1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(r) && operand.IsR32(mr) && operand.IsR32(r1): + return &intrep.Instruction{ + Opcode: "BEXTRL", + Operands: []operand.Op{r, mr, r1}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r1}, + }, nil + case operand.IsR32(r) && operand.IsM32(mr) && operand.IsR32(r1): + return &intrep.Instruction{ + Opcode: "BEXTRL", + Operands: []operand.Op{r, mr, r1}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r1}, + }, nil + } + return nil, errors.New("BEXTRL: bad operands") +} + +// BEXTRQ: Bit Field Extract. +// +// Forms: +// +// BEXTRQ r64 r64 r64 +// BEXTRQ r64 m64 r64 +func BEXTRQ(r, mr, r1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(r) && operand.IsR64(mr) && operand.IsR64(r1): + return &intrep.Instruction{ + Opcode: "BEXTRQ", + Operands: []operand.Op{r, mr, r1}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r1}, + }, nil + case operand.IsR64(r) && operand.IsM64(mr) && operand.IsR64(r1): + return &intrep.Instruction{ + Opcode: "BEXTRQ", + Operands: []operand.Op{r, mr, r1}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r1}, + }, nil + } + return nil, errors.New("BEXTRQ: bad operands") +} + +// BLENDPD: Blend Packed Double Precision Floating-Point Values. +// +// Forms: +// +// BLENDPD imm8 xmm xmm +// BLENDPD imm8 m128 xmm +func BLENDPD(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "BLENDPD", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "BLENDPD", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("BLENDPD: bad operands") +} + +// BLENDPS: Blend Packed Single Precision Floating-Point Values. +// +// Forms: +// +// BLENDPS imm8 xmm xmm +// BLENDPS imm8 m128 xmm +func BLENDPS(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "BLENDPS", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "BLENDPS", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("BLENDPS: bad operands") +} + +// BLENDVPD: Variable Blend Packed Double Precision Floating-Point Values. +// +// Forms: +// +// BLENDVPD xmm0 xmm xmm +// BLENDVPD xmm0 m128 xmm +func BLENDVPD(x, mx, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM0(x) && operand.IsXMM(mx) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "BLENDVPD", + Operands: []operand.Op{x, mx, x1}, + Inputs: []operand.Op{x, mx, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsXMM0(x) && operand.IsM128(mx) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "BLENDVPD", + Operands: []operand.Op{x, mx, x1}, + Inputs: []operand.Op{x, mx, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("BLENDVPD: bad operands") +} + +// BLENDVPS: Variable Blend Packed Single Precision Floating-Point Values. +// +// Forms: +// +// BLENDVPS xmm0 xmm xmm +// BLENDVPS xmm0 m128 xmm +func BLENDVPS(x, mx, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM0(x) && operand.IsXMM(mx) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "BLENDVPS", + Operands: []operand.Op{x, mx, x1}, + Inputs: []operand.Op{x, mx, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsXMM0(x) && operand.IsM128(mx) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "BLENDVPS", + Operands: []operand.Op{x, mx, x1}, + Inputs: []operand.Op{x, mx, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("BLENDVPS: bad operands") +} + +// BLSIL: Isolate Lowest Set Bit. +// +// Forms: +// +// BLSIL r32 r32 +// BLSIL m32 r32 +func BLSIL(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "BLSIL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "BLSIL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("BLSIL: bad operands") +} + +// BLSIQ: Isolate Lowest Set Bit. +// +// Forms: +// +// BLSIQ r64 r64 +// BLSIQ m64 r64 +func BLSIQ(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "BLSIQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "BLSIQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("BLSIQ: bad operands") +} + +// BLSMSKL: Mask From Lowest Set Bit. +// +// Forms: +// +// BLSMSKL r32 r32 +// BLSMSKL m32 r32 +func BLSMSKL(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "BLSMSKL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "BLSMSKL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("BLSMSKL: bad operands") +} + +// BLSMSKQ: Mask From Lowest Set Bit. +// +// Forms: +// +// BLSMSKQ r64 r64 +// BLSMSKQ m64 r64 +func BLSMSKQ(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "BLSMSKQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "BLSMSKQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("BLSMSKQ: bad operands") +} + +// BLSRL: Reset Lowest Set Bit. +// +// Forms: +// +// BLSRL r32 r32 +// BLSRL m32 r32 +func BLSRL(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "BLSRL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "BLSRL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("BLSRL: bad operands") +} + +// BLSRQ: Reset Lowest Set Bit. +// +// Forms: +// +// BLSRQ r64 r64 +// BLSRQ m64 r64 +func BLSRQ(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "BLSRQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "BLSRQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("BLSRQ: bad operands") +} + +// BSFL: Bit Scan Forward. +// +// Forms: +// +// BSFL r32 r32 +// BSFL m32 r32 +func BSFL(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "BSFL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "BSFL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("BSFL: bad operands") +} + +// BSFQ: Bit Scan Forward. +// +// Forms: +// +// BSFQ r64 r64 +// BSFQ m64 r64 +func BSFQ(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "BSFQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "BSFQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("BSFQ: bad operands") +} + +// BSFW: Bit Scan Forward. +// +// Forms: +// +// BSFW r16 r16 +// BSFW m16 r16 +func BSFW(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "BSFW", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "BSFW", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("BSFW: bad operands") +} + +// BSRL: Bit Scan Reverse. +// +// Forms: +// +// BSRL r32 r32 +// BSRL m32 r32 +func BSRL(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "BSRL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "BSRL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("BSRL: bad operands") +} + +// BSRQ: Bit Scan Reverse. +// +// Forms: +// +// BSRQ r64 r64 +// BSRQ m64 r64 +func BSRQ(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "BSRQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "BSRQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("BSRQ: bad operands") +} + +// BSRW: Bit Scan Reverse. +// +// Forms: +// +// BSRW r16 r16 +// BSRW m16 r16 +func BSRW(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "BSRW", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "BSRW", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("BSRW: bad operands") +} + +// BSWAPL: Byte Swap. +// +// Forms: +// +// BSWAPL r32 +func BSWAPL(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "BSWAPL", + Operands: []operand.Op{r}, + Inputs: []operand.Op{r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("BSWAPL: bad operands") +} + +// BSWAPQ: Byte Swap. +// +// Forms: +// +// BSWAPQ r64 +func BSWAPQ(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "BSWAPQ", + Operands: []operand.Op{r}, + Inputs: []operand.Op{r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("BSWAPQ: bad operands") +} + +// BTCL: Bit Test and Complement. +// +// Forms: +// +// BTCL imm8 r32 +// BTCL r32 r32 +// BTCL imm8 m32 +// BTCL r32 m32 +func BTCL(ir, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(ir) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "BTCL", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR32(ir) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "BTCL", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ir) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "BTCL", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR32(ir) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "BTCL", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("BTCL: bad operands") +} + +// BTCQ: Bit Test and Complement. +// +// Forms: +// +// BTCQ imm8 r64 +// BTCQ r64 r64 +// BTCQ imm8 m64 +// BTCQ r64 m64 +func BTCQ(ir, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(ir) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "BTCQ", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR64(ir) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "BTCQ", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ir) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "BTCQ", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR64(ir) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "BTCQ", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("BTCQ: bad operands") +} + +// BTCW: Bit Test and Complement. +// +// Forms: +// +// BTCW imm8 r16 +// BTCW r16 r16 +// BTCW imm8 m16 +// BTCW r16 m16 +func BTCW(ir, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(ir) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "BTCW", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR16(ir) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "BTCW", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ir) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "BTCW", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR16(ir) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "BTCW", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("BTCW: bad operands") +} + +// BTL: Bit Test. +// +// Forms: +// +// BTL imm8 r32 +// BTL r32 r32 +// BTL imm8 m32 +// BTL r32 m32 +func BTL(ir, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(ir) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "BTL", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR32(ir) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "BTL", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsIMM8(ir) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "BTL", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR32(ir) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "BTL", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("BTL: bad operands") +} + +// BTQ: Bit Test. +// +// Forms: +// +// BTQ imm8 r64 +// BTQ r64 r64 +// BTQ imm8 m64 +// BTQ r64 m64 +func BTQ(ir, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(ir) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "BTQ", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR64(ir) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "BTQ", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsIMM8(ir) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "BTQ", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR64(ir) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "BTQ", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("BTQ: bad operands") +} + +// BTRL: Bit Test and Reset. +// +// Forms: +// +// BTRL imm8 r32 +// BTRL r32 r32 +// BTRL imm8 m32 +// BTRL r32 m32 +func BTRL(ir, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(ir) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "BTRL", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR32(ir) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "BTRL", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ir) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "BTRL", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR32(ir) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "BTRL", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("BTRL: bad operands") +} + +// BTRQ: Bit Test and Reset. +// +// Forms: +// +// BTRQ imm8 r64 +// BTRQ r64 r64 +// BTRQ imm8 m64 +// BTRQ r64 m64 +func BTRQ(ir, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(ir) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "BTRQ", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR64(ir) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "BTRQ", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ir) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "BTRQ", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR64(ir) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "BTRQ", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("BTRQ: bad operands") +} + +// BTRW: Bit Test and Reset. +// +// Forms: +// +// BTRW imm8 r16 +// BTRW r16 r16 +// BTRW imm8 m16 +// BTRW r16 m16 +func BTRW(ir, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(ir) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "BTRW", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR16(ir) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "BTRW", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ir) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "BTRW", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR16(ir) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "BTRW", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("BTRW: bad operands") +} + +// BTSL: Bit Test and Set. +// +// Forms: +// +// BTSL imm8 r32 +// BTSL r32 r32 +// BTSL imm8 m32 +// BTSL r32 m32 +func BTSL(ir, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(ir) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "BTSL", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR32(ir) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "BTSL", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ir) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "BTSL", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR32(ir) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "BTSL", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("BTSL: bad operands") +} + +// BTSQ: Bit Test and Set. +// +// Forms: +// +// BTSQ imm8 r64 +// BTSQ r64 r64 +// BTSQ imm8 m64 +// BTSQ r64 m64 +func BTSQ(ir, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(ir) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "BTSQ", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR64(ir) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "BTSQ", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ir) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "BTSQ", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR64(ir) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "BTSQ", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("BTSQ: bad operands") +} + +// BTSW: Bit Test and Set. +// +// Forms: +// +// BTSW imm8 r16 +// BTSW r16 r16 +// BTSW imm8 m16 +// BTSW r16 m16 +func BTSW(ir, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(ir) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "BTSW", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR16(ir) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "BTSW", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ir) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "BTSW", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR16(ir) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "BTSW", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("BTSW: bad operands") +} + +// BTW: Bit Test. +// +// Forms: +// +// BTW imm8 r16 +// BTW r16 r16 +// BTW imm8 m16 +// BTW r16 m16 +func BTW(ir, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(ir) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "BTW", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR16(ir) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "BTW", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsIMM8(ir) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "BTW", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR16(ir) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "BTW", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("BTW: bad operands") +} + +// BZHIL: Zero High Bits Starting with Specified Bit Position. +// +// Forms: +// +// BZHIL r32 r32 r32 +// BZHIL r32 m32 r32 +func BZHIL(r, mr, r1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(r) && operand.IsR32(mr) && operand.IsR32(r1): + return &intrep.Instruction{ + Opcode: "BZHIL", + Operands: []operand.Op{r, mr, r1}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r1}, + }, nil + case operand.IsR32(r) && operand.IsM32(mr) && operand.IsR32(r1): + return &intrep.Instruction{ + Opcode: "BZHIL", + Operands: []operand.Op{r, mr, r1}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r1}, + }, nil + } + return nil, errors.New("BZHIL: bad operands") +} + +// BZHIQ: Zero High Bits Starting with Specified Bit Position. +// +// Forms: +// +// BZHIQ r64 r64 r64 +// BZHIQ r64 m64 r64 +func BZHIQ(r, mr, r1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(r) && operand.IsR64(mr) && operand.IsR64(r1): + return &intrep.Instruction{ + Opcode: "BZHIQ", + Operands: []operand.Op{r, mr, r1}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r1}, + }, nil + case operand.IsR64(r) && operand.IsM64(mr) && operand.IsR64(r1): + return &intrep.Instruction{ + Opcode: "BZHIQ", + Operands: []operand.Op{r, mr, r1}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r1}, + }, nil + } + return nil, errors.New("BZHIQ: bad operands") +} + +// CALL: Call Procedure. +// +// Forms: +// +// CALL rel32 +func CALL(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "CALL", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("CALL: bad operands") +} + +// CBW: Convert Byte to Word. +// +// Forms: +// +// CBW +func CBW() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "CBW", + Operands: nil, + Inputs: []operand.Op{reg.AL}, + Outputs: []operand.Op{reg.AX}, + }, nil +} + +// CDQ: Convert Doubleword to Quadword. +// +// Forms: +// +// CDQ +func CDQ() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "CDQ", + Operands: nil, + Inputs: []operand.Op{reg.EAX}, + Outputs: []operand.Op{reg.EDX}, + }, nil +} + +// CDQE: Convert Doubleword to Quadword. +// +// Forms: +// +// CDQE +func CDQE() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "CDQE", + Operands: nil, + Inputs: []operand.Op{reg.EAX}, + Outputs: []operand.Op{reg.RAX}, + }, nil +} + +// CLC: Clear Carry Flag. +// +// Forms: +// +// CLC +func CLC() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "CLC", + Operands: nil, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + }, nil +} + +// CLD: Clear Direction Flag. +// +// Forms: +// +// CLD +func CLD() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "CLD", + Operands: nil, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + }, nil +} + +// CLFLUSH: Flush Cache Line. +// +// Forms: +// +// CLFLUSH m8 +func CLFLUSH(m operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM8(m): + return &intrep.Instruction{ + Opcode: "CLFLUSH", + Operands: []operand.Op{m}, + Inputs: []operand.Op{m}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("CLFLUSH: bad operands") +} + +// CLFLUSHOPT: Flush Cache Line Optimized. +// +// Forms: +// +// CLFLUSHOPT m8 +func CLFLUSHOPT(m operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM8(m): + return &intrep.Instruction{ + Opcode: "CLFLUSHOPT", + Operands: []operand.Op{m}, + Inputs: []operand.Op{m}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("CLFLUSHOPT: bad operands") +} + +// CMC: Complement Carry Flag. +// +// Forms: +// +// CMC +func CMC() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "CMC", + Operands: nil, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + }, nil +} + +// CMOVLCC: Move if above or equal (CF == 0). +// +// Forms: +// +// CMOVLCC r32 r32 +// CMOVLCC m32 r32 +func CMOVLCC(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLCC", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLCC", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVLCC: bad operands") +} + +// CMOVLCS: Move if below (CF == 1). +// +// Forms: +// +// CMOVLCS r32 r32 +// CMOVLCS m32 r32 +func CMOVLCS(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLCS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLCS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVLCS: bad operands") +} + +// CMOVLEQ: Move if equal (ZF == 1). +// +// Forms: +// +// CMOVLEQ r32 r32 +// CMOVLEQ m32 r32 +func CMOVLEQ(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLEQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLEQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVLEQ: bad operands") +} + +// CMOVLGE: Move if greater or equal (SF == OF). +// +// Forms: +// +// CMOVLGE r32 r32 +// CMOVLGE m32 r32 +func CMOVLGE(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLGE", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLGE", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVLGE: bad operands") +} + +// CMOVLGT: Move if greater (ZF == 0 and SF == OF). +// +// Forms: +// +// CMOVLGT r32 r32 +// CMOVLGT m32 r32 +func CMOVLGT(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLGT", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLGT", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVLGT: bad operands") +} + +// CMOVLHI: Move if above (CF == 0 and ZF == 0). +// +// Forms: +// +// CMOVLHI r32 r32 +// CMOVLHI m32 r32 +func CMOVLHI(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLHI", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLHI", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVLHI: bad operands") +} + +// CMOVLLE: Move if less or equal (ZF == 1 or SF != OF). +// +// Forms: +// +// CMOVLLE r32 r32 +// CMOVLLE m32 r32 +func CMOVLLE(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLLE", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLLE", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVLLE: bad operands") +} + +// CMOVLLS: Move if below or equal (CF == 1 or ZF == 1). +// +// Forms: +// +// CMOVLLS r32 r32 +// CMOVLLS m32 r32 +func CMOVLLS(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLLS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLLS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVLLS: bad operands") +} + +// CMOVLLT: Move if less (SF != OF). +// +// Forms: +// +// CMOVLLT r32 r32 +// CMOVLLT m32 r32 +func CMOVLLT(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLLT", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLLT", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVLLT: bad operands") +} + +// CMOVLMI: Move if sign (SF == 1). +// +// Forms: +// +// CMOVLMI r32 r32 +// CMOVLMI m32 r32 +func CMOVLMI(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLMI", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLMI", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVLMI: bad operands") +} + +// CMOVLNE: Move if not equal (ZF == 0). +// +// Forms: +// +// CMOVLNE r32 r32 +// CMOVLNE m32 r32 +func CMOVLNE(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLNE", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLNE", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVLNE: bad operands") +} + +// CMOVLOC: Move if not overflow (OF == 0). +// +// Forms: +// +// CMOVLOC r32 r32 +// CMOVLOC m32 r32 +func CMOVLOC(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLOC", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLOC", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVLOC: bad operands") +} + +// CMOVLOS: Move if overflow (OF == 1). +// +// Forms: +// +// CMOVLOS r32 r32 +// CMOVLOS m32 r32 +func CMOVLOS(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLOS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLOS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVLOS: bad operands") +} + +// CMOVLPC: Move if not parity (PF == 0). +// +// Forms: +// +// CMOVLPC r32 r32 +// CMOVLPC m32 r32 +func CMOVLPC(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLPC", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLPC", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVLPC: bad operands") +} + +// CMOVLPL: Move if not sign (SF == 0). +// +// Forms: +// +// CMOVLPL r32 r32 +// CMOVLPL m32 r32 +func CMOVLPL(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLPL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLPL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVLPL: bad operands") +} + +// CMOVLPS: Move if parity (PF == 1). +// +// Forms: +// +// CMOVLPS r32 r32 +// CMOVLPS m32 r32 +func CMOVLPS(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLPS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CMOVLPS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVLPS: bad operands") +} + +// CMOVQCC: Move if above or equal (CF == 0). +// +// Forms: +// +// CMOVQCC r64 r64 +// CMOVQCC m64 r64 +func CMOVQCC(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQCC", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQCC", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVQCC: bad operands") +} + +// CMOVQCS: Move if below (CF == 1). +// +// Forms: +// +// CMOVQCS r64 r64 +// CMOVQCS m64 r64 +func CMOVQCS(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQCS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQCS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVQCS: bad operands") +} + +// CMOVQEQ: Move if equal (ZF == 1). +// +// Forms: +// +// CMOVQEQ r64 r64 +// CMOVQEQ m64 r64 +func CMOVQEQ(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQEQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQEQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVQEQ: bad operands") +} + +// CMOVQGE: Move if greater or equal (SF == OF). +// +// Forms: +// +// CMOVQGE r64 r64 +// CMOVQGE m64 r64 +func CMOVQGE(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQGE", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQGE", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVQGE: bad operands") +} + +// CMOVQGT: Move if greater (ZF == 0 and SF == OF). +// +// Forms: +// +// CMOVQGT r64 r64 +// CMOVQGT m64 r64 +func CMOVQGT(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQGT", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQGT", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVQGT: bad operands") +} + +// CMOVQHI: Move if above (CF == 0 and ZF == 0). +// +// Forms: +// +// CMOVQHI r64 r64 +// CMOVQHI m64 r64 +func CMOVQHI(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQHI", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQHI", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVQHI: bad operands") +} + +// CMOVQLE: Move if less or equal (ZF == 1 or SF != OF). +// +// Forms: +// +// CMOVQLE r64 r64 +// CMOVQLE m64 r64 +func CMOVQLE(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQLE", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQLE", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVQLE: bad operands") +} + +// CMOVQLS: Move if below or equal (CF == 1 or ZF == 1). +// +// Forms: +// +// CMOVQLS r64 r64 +// CMOVQLS m64 r64 +func CMOVQLS(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQLS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQLS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVQLS: bad operands") +} + +// CMOVQLT: Move if less (SF != OF). +// +// Forms: +// +// CMOVQLT r64 r64 +// CMOVQLT m64 r64 +func CMOVQLT(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQLT", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQLT", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVQLT: bad operands") +} + +// CMOVQMI: Move if sign (SF == 1). +// +// Forms: +// +// CMOVQMI r64 r64 +// CMOVQMI m64 r64 +func CMOVQMI(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQMI", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQMI", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVQMI: bad operands") +} + +// CMOVQNE: Move if not equal (ZF == 0). +// +// Forms: +// +// CMOVQNE r64 r64 +// CMOVQNE m64 r64 +func CMOVQNE(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQNE", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQNE", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVQNE: bad operands") +} + +// CMOVQOC: Move if not overflow (OF == 0). +// +// Forms: +// +// CMOVQOC r64 r64 +// CMOVQOC m64 r64 +func CMOVQOC(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQOC", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQOC", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVQOC: bad operands") +} + +// CMOVQOS: Move if overflow (OF == 1). +// +// Forms: +// +// CMOVQOS r64 r64 +// CMOVQOS m64 r64 +func CMOVQOS(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQOS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQOS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVQOS: bad operands") +} + +// CMOVQPC: Move if not parity (PF == 0). +// +// Forms: +// +// CMOVQPC r64 r64 +// CMOVQPC m64 r64 +func CMOVQPC(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQPC", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQPC", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVQPC: bad operands") +} + +// CMOVQPL: Move if not sign (SF == 0). +// +// Forms: +// +// CMOVQPL r64 r64 +// CMOVQPL m64 r64 +func CMOVQPL(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQPL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQPL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVQPL: bad operands") +} + +// CMOVQPS: Move if parity (PF == 1). +// +// Forms: +// +// CMOVQPS r64 r64 +// CMOVQPS m64 r64 +func CMOVQPS(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQPS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CMOVQPS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVQPS: bad operands") +} + +// CMOVWCC: Move if above or equal (CF == 0). +// +// Forms: +// +// CMOVWCC r16 r16 +// CMOVWCC m16 r16 +func CMOVWCC(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWCC", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWCC", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVWCC: bad operands") +} + +// CMOVWCS: Move if below (CF == 1). +// +// Forms: +// +// CMOVWCS r16 r16 +// CMOVWCS m16 r16 +func CMOVWCS(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWCS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWCS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVWCS: bad operands") +} + +// CMOVWEQ: Move if equal (ZF == 1). +// +// Forms: +// +// CMOVWEQ r16 r16 +// CMOVWEQ m16 r16 +func CMOVWEQ(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWEQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWEQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVWEQ: bad operands") +} + +// CMOVWGE: Move if greater or equal (SF == OF). +// +// Forms: +// +// CMOVWGE r16 r16 +// CMOVWGE m16 r16 +func CMOVWGE(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWGE", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWGE", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVWGE: bad operands") +} + +// CMOVWGT: Move if greater (ZF == 0 and SF == OF). +// +// Forms: +// +// CMOVWGT r16 r16 +// CMOVWGT m16 r16 +func CMOVWGT(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWGT", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWGT", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVWGT: bad operands") +} + +// CMOVWHI: Move if above (CF == 0 and ZF == 0). +// +// Forms: +// +// CMOVWHI r16 r16 +// CMOVWHI m16 r16 +func CMOVWHI(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWHI", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWHI", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVWHI: bad operands") +} + +// CMOVWLE: Move if less or equal (ZF == 1 or SF != OF). +// +// Forms: +// +// CMOVWLE r16 r16 +// CMOVWLE m16 r16 +func CMOVWLE(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWLE", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWLE", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVWLE: bad operands") +} + +// CMOVWLS: Move if below or equal (CF == 1 or ZF == 1). +// +// Forms: +// +// CMOVWLS r16 r16 +// CMOVWLS m16 r16 +func CMOVWLS(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWLS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWLS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVWLS: bad operands") +} + +// CMOVWLT: Move if less (SF != OF). +// +// Forms: +// +// CMOVWLT r16 r16 +// CMOVWLT m16 r16 +func CMOVWLT(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWLT", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWLT", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVWLT: bad operands") +} + +// CMOVWMI: Move if sign (SF == 1). +// +// Forms: +// +// CMOVWMI r16 r16 +// CMOVWMI m16 r16 +func CMOVWMI(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWMI", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWMI", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVWMI: bad operands") +} + +// CMOVWNE: Move if not equal (ZF == 0). +// +// Forms: +// +// CMOVWNE r16 r16 +// CMOVWNE m16 r16 +func CMOVWNE(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWNE", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWNE", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVWNE: bad operands") +} + +// CMOVWOC: Move if not overflow (OF == 0). +// +// Forms: +// +// CMOVWOC r16 r16 +// CMOVWOC m16 r16 +func CMOVWOC(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWOC", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWOC", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVWOC: bad operands") +} + +// CMOVWOS: Move if overflow (OF == 1). +// +// Forms: +// +// CMOVWOS r16 r16 +// CMOVWOS m16 r16 +func CMOVWOS(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWOS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWOS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVWOS: bad operands") +} + +// CMOVWPC: Move if not parity (PF == 0). +// +// Forms: +// +// CMOVWPC r16 r16 +// CMOVWPC m16 r16 +func CMOVWPC(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWPC", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWPC", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVWPC: bad operands") +} + +// CMOVWPL: Move if not sign (SF == 0). +// +// Forms: +// +// CMOVWPL r16 r16 +// CMOVWPL m16 r16 +func CMOVWPL(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWPL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWPL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVWPL: bad operands") +} + +// CMOVWPS: Move if parity (PF == 1). +// +// Forms: +// +// CMOVWPS r16 r16 +// CMOVWPS m16 r16 +func CMOVWPS(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWPS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "CMOVWPS", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CMOVWPS: bad operands") +} + +// CMPB: Compare Two Operands. +// +// Forms: +// +// CMPB al imm8 +// CMPB r8 imm8 +// CMPB r8 r8 +// CMPB r8 m8 +// CMPB m8 imm8 +// CMPB m8 r8 +func CMPB(amr, imr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsAL(amr) && operand.IsIMM8(imr): + return &intrep.Instruction{ + Opcode: "CMPB", + Operands: []operand.Op{amr, imr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR8(amr) && operand.IsIMM8(imr): + return &intrep.Instruction{ + Opcode: "CMPB", + Operands: []operand.Op{amr, imr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR8(amr) && operand.IsR8(imr): + return &intrep.Instruction{ + Opcode: "CMPB", + Operands: []operand.Op{amr, imr}, + Inputs: []operand.Op{amr, imr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR8(amr) && operand.IsM8(imr): + return &intrep.Instruction{ + Opcode: "CMPB", + Operands: []operand.Op{amr, imr}, + Inputs: []operand.Op{amr, imr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM8(amr) && operand.IsIMM8(imr): + return &intrep.Instruction{ + Opcode: "CMPB", + Operands: []operand.Op{amr, imr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM8(amr) && operand.IsR8(imr): + return &intrep.Instruction{ + Opcode: "CMPB", + Operands: []operand.Op{amr, imr}, + Inputs: []operand.Op{amr, imr}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("CMPB: bad operands") +} + +// CMPL: Compare Two Operands. +// +// Forms: +// +// CMPL eax imm32 +// CMPL r32 imm8 +// CMPL r32 imm32 +// CMPL r32 r32 +// CMPL r32 m32 +// CMPL m32 imm8 +// CMPL m32 imm32 +// CMPL m32 r32 +func CMPL(emr, imr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsEAX(emr) && operand.IsIMM32(imr): + return &intrep.Instruction{ + Opcode: "CMPL", + Operands: []operand.Op{emr, imr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR32(emr) && operand.IsIMM8(imr): + return &intrep.Instruction{ + Opcode: "CMPL", + Operands: []operand.Op{emr, imr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR32(emr) && operand.IsIMM32(imr): + return &intrep.Instruction{ + Opcode: "CMPL", + Operands: []operand.Op{emr, imr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR32(emr) && operand.IsR32(imr): + return &intrep.Instruction{ + Opcode: "CMPL", + Operands: []operand.Op{emr, imr}, + Inputs: []operand.Op{emr, imr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR32(emr) && operand.IsM32(imr): + return &intrep.Instruction{ + Opcode: "CMPL", + Operands: []operand.Op{emr, imr}, + Inputs: []operand.Op{emr, imr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM32(emr) && operand.IsIMM8(imr): + return &intrep.Instruction{ + Opcode: "CMPL", + Operands: []operand.Op{emr, imr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM32(emr) && operand.IsIMM32(imr): + return &intrep.Instruction{ + Opcode: "CMPL", + Operands: []operand.Op{emr, imr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM32(emr) && operand.IsR32(imr): + return &intrep.Instruction{ + Opcode: "CMPL", + Operands: []operand.Op{emr, imr}, + Inputs: []operand.Op{emr, imr}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("CMPL: bad operands") +} + +// CMPPD: Compare Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// CMPPD xmm xmm imm8 +// CMPPD m128 xmm imm8 +func CMPPD(mx, x, i operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsIMM8(i): + return &intrep.Instruction{ + Opcode: "CMPPD", + Operands: []operand.Op{mx, x, i}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x) && operand.IsIMM8(i): + return &intrep.Instruction{ + Opcode: "CMPPD", + Operands: []operand.Op{mx, x, i}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("CMPPD: bad operands") +} + +// CMPPS: Compare Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// CMPPS xmm xmm imm8 +// CMPPS m128 xmm imm8 +func CMPPS(mx, x, i operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsIMM8(i): + return &intrep.Instruction{ + Opcode: "CMPPS", + Operands: []operand.Op{mx, x, i}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x) && operand.IsIMM8(i): + return &intrep.Instruction{ + Opcode: "CMPPS", + Operands: []operand.Op{mx, x, i}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("CMPPS: bad operands") +} + +// CMPQ: Compare Two Operands. +// +// Forms: +// +// CMPQ rax imm32 +// CMPQ r64 imm8 +// CMPQ r64 imm32 +// CMPQ r64 r64 +// CMPQ r64 m64 +// CMPQ m64 imm8 +// CMPQ m64 imm32 +// CMPQ m64 r64 +func CMPQ(mr, imr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsRAX(mr) && operand.IsIMM32(imr): + return &intrep.Instruction{ + Opcode: "CMPQ", + Operands: []operand.Op{mr, imr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR64(mr) && operand.IsIMM8(imr): + return &intrep.Instruction{ + Opcode: "CMPQ", + Operands: []operand.Op{mr, imr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR64(mr) && operand.IsIMM32(imr): + return &intrep.Instruction{ + Opcode: "CMPQ", + Operands: []operand.Op{mr, imr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR64(mr) && operand.IsR64(imr): + return &intrep.Instruction{ + Opcode: "CMPQ", + Operands: []operand.Op{mr, imr}, + Inputs: []operand.Op{mr, imr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR64(mr) && operand.IsM64(imr): + return &intrep.Instruction{ + Opcode: "CMPQ", + Operands: []operand.Op{mr, imr}, + Inputs: []operand.Op{mr, imr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM64(mr) && operand.IsIMM8(imr): + return &intrep.Instruction{ + Opcode: "CMPQ", + Operands: []operand.Op{mr, imr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM64(mr) && operand.IsIMM32(imr): + return &intrep.Instruction{ + Opcode: "CMPQ", + Operands: []operand.Op{mr, imr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM64(mr) && operand.IsR64(imr): + return &intrep.Instruction{ + Opcode: "CMPQ", + Operands: []operand.Op{mr, imr}, + Inputs: []operand.Op{mr, imr}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("CMPQ: bad operands") +} + +// CMPSD: Compare Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// CMPSD xmm xmm imm8 +// CMPSD m64 xmm imm8 +func CMPSD(mx, x, i operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsIMM8(i): + return &intrep.Instruction{ + Opcode: "CMPSD", + Operands: []operand.Op{mx, x, i}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x) && operand.IsIMM8(i): + return &intrep.Instruction{ + Opcode: "CMPSD", + Operands: []operand.Op{mx, x, i}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("CMPSD: bad operands") +} + +// CMPSS: Compare Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// CMPSS xmm xmm imm8 +// CMPSS m32 xmm imm8 +func CMPSS(mx, x, i operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsIMM8(i): + return &intrep.Instruction{ + Opcode: "CMPSS", + Operands: []operand.Op{mx, x, i}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x) && operand.IsIMM8(i): + return &intrep.Instruction{ + Opcode: "CMPSS", + Operands: []operand.Op{mx, x, i}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("CMPSS: bad operands") +} + +// CMPW: Compare Two Operands. +// +// Forms: +// +// CMPW ax imm16 +// CMPW r16 imm8 +// CMPW r16 imm16 +// CMPW r16 r16 +// CMPW r16 m16 +// CMPW m16 imm8 +// CMPW m16 imm16 +// CMPW m16 r16 +func CMPW(amr, imr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsAX(amr) && operand.IsIMM16(imr): + return &intrep.Instruction{ + Opcode: "CMPW", + Operands: []operand.Op{amr, imr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR16(amr) && operand.IsIMM8(imr): + return &intrep.Instruction{ + Opcode: "CMPW", + Operands: []operand.Op{amr, imr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR16(amr) && operand.IsIMM16(imr): + return &intrep.Instruction{ + Opcode: "CMPW", + Operands: []operand.Op{amr, imr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR16(amr) && operand.IsR16(imr): + return &intrep.Instruction{ + Opcode: "CMPW", + Operands: []operand.Op{amr, imr}, + Inputs: []operand.Op{amr, imr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR16(amr) && operand.IsM16(imr): + return &intrep.Instruction{ + Opcode: "CMPW", + Operands: []operand.Op{amr, imr}, + Inputs: []operand.Op{amr, imr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM16(amr) && operand.IsIMM8(imr): + return &intrep.Instruction{ + Opcode: "CMPW", + Operands: []operand.Op{amr, imr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM16(amr) && operand.IsIMM16(imr): + return &intrep.Instruction{ + Opcode: "CMPW", + Operands: []operand.Op{amr, imr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM16(amr) && operand.IsR16(imr): + return &intrep.Instruction{ + Opcode: "CMPW", + Operands: []operand.Op{amr, imr}, + Inputs: []operand.Op{amr, imr}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("CMPW: bad operands") +} + +// CMPXCHG16B: Compare and Exchange 16 Bytes. +// +// Forms: +// +// CMPXCHG16B m128 +func CMPXCHG16B(m operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM128(m): + return &intrep.Instruction{ + Opcode: "CMPXCHG16B", + Operands: []operand.Op{m}, + Inputs: []operand.Op{m, reg.RAX, reg.RBX, reg.RCX, reg.RDX}, + Outputs: []operand.Op{reg.RAX, reg.RDX}, + }, nil + } + return nil, errors.New("CMPXCHG16B: bad operands") +} + +// CMPXCHG8B: Compare and Exchange 8 Bytes. +// +// Forms: +// +// CMPXCHG8B m64 +func CMPXCHG8B(m operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM64(m): + return &intrep.Instruction{ + Opcode: "CMPXCHG8B", + Operands: []operand.Op{m}, + Inputs: []operand.Op{m, reg.EAX, reg.EBX, reg.ECX, reg.EDX}, + Outputs: []operand.Op{reg.EAX, reg.EDX}, + }, nil + } + return nil, errors.New("CMPXCHG8B: bad operands") +} + +// CMPXCHGB: Compare and Exchange. +// +// Forms: +// +// CMPXCHGB r8 r8 +// CMPXCHGB r8 m8 +func CMPXCHGB(r, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(r) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "CMPXCHGB", + Operands: []operand.Op{r, mr}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR8(r) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "CMPXCHGB", + Operands: []operand.Op{r, mr}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("CMPXCHGB: bad operands") +} + +// CMPXCHGL: Compare and Exchange. +// +// Forms: +// +// CMPXCHGL r32 r32 +// CMPXCHGL r32 m32 +func CMPXCHGL(r, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(r) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "CMPXCHGL", + Operands: []operand.Op{r, mr}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR32(r) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "CMPXCHGL", + Operands: []operand.Op{r, mr}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("CMPXCHGL: bad operands") +} + +// CMPXCHGQ: Compare and Exchange. +// +// Forms: +// +// CMPXCHGQ r64 r64 +// CMPXCHGQ r64 m64 +func CMPXCHGQ(r, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(r) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "CMPXCHGQ", + Operands: []operand.Op{r, mr}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR64(r) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "CMPXCHGQ", + Operands: []operand.Op{r, mr}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("CMPXCHGQ: bad operands") +} + +// CMPXCHGW: Compare and Exchange. +// +// Forms: +// +// CMPXCHGW r16 r16 +// CMPXCHGW r16 m16 +func CMPXCHGW(r, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(r) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "CMPXCHGW", + Operands: []operand.Op{r, mr}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR16(r) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "CMPXCHGW", + Operands: []operand.Op{r, mr}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("CMPXCHGW: bad operands") +} + +// COMISD: Compare Scalar Ordered Double-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// COMISD xmm xmm +// COMISD m64 xmm +func COMISD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "COMISD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "COMISD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("COMISD: bad operands") +} + +// COMISS: Compare Scalar Ordered Single-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// COMISS xmm xmm +// COMISS m32 xmm +func COMISS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "COMISS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "COMISS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("COMISS: bad operands") +} + +// CPUID: CPU Identification. +// +// Forms: +// +// CPUID +func CPUID() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "CPUID", + Operands: nil, + Inputs: []operand.Op{reg.EAX, reg.ECX}, + Outputs: []operand.Op{reg.EAX, reg.EBX, reg.ECX, reg.EDX}, + }, nil +} + +// CQO: Convert Quadword to Octaword. +// +// Forms: +// +// CQO +func CQO() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "CQO", + Operands: nil, + Inputs: []operand.Op{reg.RAX}, + Outputs: []operand.Op{reg.RDX}, + }, nil +} + +// CRC32B: Accumulate CRC32 Value. +// +// Forms: +// +// CRC32B r8 r32 +// CRC32B m8 r32 +// CRC32B r8 r64 +// CRC32B m8 r64 +func CRC32B(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CRC32B", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM8(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CRC32B", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsR8(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CRC32B", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM8(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CRC32B", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CRC32B: bad operands") +} + +// CRC32L: Accumulate CRC32 Value. +// +// Forms: +// +// CRC32L r32 r32 +// CRC32L m32 r32 +func CRC32L(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CRC32L", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CRC32L", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CRC32L: bad operands") +} + +// CRC32Q: Accumulate CRC32 Value. +// +// Forms: +// +// CRC32Q r64 r64 +// CRC32Q m64 r64 +func CRC32Q(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CRC32Q", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CRC32Q", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CRC32Q: bad operands") +} + +// CRC32W: Accumulate CRC32 Value. +// +// Forms: +// +// CRC32W r16 r32 +// CRC32W m16 r32 +func CRC32W(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CRC32W", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CRC32W", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CRC32W: bad operands") +} + +// CVTPD2PL: Convert Packed Double-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// CVTPD2PL xmm xmm +// CVTPD2PL m128 xmm +func CVTPD2PL(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTPD2PL", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTPD2PL", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("CVTPD2PL: bad operands") +} + +// CVTPD2PS: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values. +// +// Forms: +// +// CVTPD2PS xmm xmm +// CVTPD2PS m128 xmm +func CVTPD2PS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTPD2PS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTPD2PS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("CVTPD2PS: bad operands") +} + +// CVTPL2PD: Convert Packed Dword Integers to Packed Double-Precision FP Values. +// +// Forms: +// +// CVTPL2PD xmm xmm +// CVTPL2PD m64 xmm +func CVTPL2PD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTPL2PD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTPL2PD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("CVTPL2PD: bad operands") +} + +// CVTPL2PS: Convert Packed Dword Integers to Packed Single-Precision FP Values. +// +// Forms: +// +// CVTPL2PS xmm xmm +// CVTPL2PS m128 xmm +func CVTPL2PS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTPL2PS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTPL2PS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("CVTPL2PS: bad operands") +} + +// CVTPS2PD: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values. +// +// Forms: +// +// CVTPS2PD xmm xmm +// CVTPS2PD m64 xmm +func CVTPS2PD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTPS2PD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTPS2PD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("CVTPS2PD: bad operands") +} + +// CVTPS2PL: Convert Packed Single-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// CVTPS2PL xmm xmm +// CVTPS2PL m128 xmm +func CVTPS2PL(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTPS2PL", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTPS2PL", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("CVTPS2PL: bad operands") +} + +// CVTSD2SL: Convert Scalar Double-Precision FP Value to Integer. +// +// Forms: +// +// CVTSD2SL xmm r32 +// CVTSD2SL m64 r32 +// CVTSD2SL xmm r64 +// CVTSD2SL m64 r64 +func CVTSD2SL(mx, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CVTSD2SL", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mx) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CVTSD2SL", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsXMM(mx) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CVTSD2SL", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mx) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CVTSD2SL", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CVTSD2SL: bad operands") +} + +// CVTSD2SS: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value. +// +// Forms: +// +// CVTSD2SS xmm xmm +// CVTSD2SS m64 xmm +func CVTSD2SS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTSD2SS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTSD2SS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("CVTSD2SS: bad operands") +} + +// CVTSL2SD: Convert Dword Integer to Scalar Double-Precision FP Value. +// +// Forms: +// +// CVTSL2SD r32 xmm +// CVTSL2SD m32 xmm +func CVTSL2SD(mr, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTSL2SD", + Operands: []operand.Op{mr, x}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM32(mr) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTSL2SD", + Operands: []operand.Op{mr, x}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("CVTSL2SD: bad operands") +} + +// CVTSL2SS: Convert Dword Integer to Scalar Single-Precision FP Value. +// +// Forms: +// +// CVTSL2SS r32 xmm +// CVTSL2SS m32 xmm +func CVTSL2SS(mr, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTSL2SS", + Operands: []operand.Op{mr, x}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM32(mr) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTSL2SS", + Operands: []operand.Op{mr, x}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("CVTSL2SS: bad operands") +} + +// CVTSQ2SD: Convert Dword Integer to Scalar Double-Precision FP Value. +// +// Forms: +// +// CVTSQ2SD r64 xmm +// CVTSQ2SD m64 xmm +func CVTSQ2SD(mr, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTSQ2SD", + Operands: []operand.Op{mr, x}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM64(mr) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTSQ2SD", + Operands: []operand.Op{mr, x}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("CVTSQ2SD: bad operands") +} + +// CVTSQ2SS: Convert Dword Integer to Scalar Single-Precision FP Value. +// +// Forms: +// +// CVTSQ2SS r64 xmm +// CVTSQ2SS m64 xmm +func CVTSQ2SS(mr, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTSQ2SS", + Operands: []operand.Op{mr, x}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM64(mr) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTSQ2SS", + Operands: []operand.Op{mr, x}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("CVTSQ2SS: bad operands") +} + +// CVTSS2SD: Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value. +// +// Forms: +// +// CVTSS2SD xmm xmm +// CVTSS2SD m32 xmm +func CVTSS2SD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTSS2SD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTSS2SD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("CVTSS2SD: bad operands") +} + +// CVTSS2SL: Convert Scalar Single-Precision FP Value to Dword Integer. +// +// Forms: +// +// CVTSS2SL xmm r32 +// CVTSS2SL m32 r32 +// CVTSS2SL xmm r64 +// CVTSS2SL m32 r64 +func CVTSS2SL(mx, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CVTSS2SL", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mx) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CVTSS2SL", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsXMM(mx) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CVTSS2SL", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mx) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CVTSS2SL", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CVTSS2SL: bad operands") +} + +// CVTTPD2PL: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// CVTTPD2PL xmm xmm +// CVTTPD2PL m128 xmm +func CVTTPD2PL(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTTPD2PL", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTTPD2PL", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("CVTTPD2PL: bad operands") +} + +// CVTTPS2PL: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// CVTTPS2PL xmm xmm +// CVTTPS2PL m128 xmm +func CVTTPS2PL(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTTPS2PL", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "CVTTPS2PL", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("CVTTPS2PL: bad operands") +} + +// CVTTSD2SL: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer. +// +// Forms: +// +// CVTTSD2SL xmm r32 +// CVTTSD2SL m64 r32 +func CVTTSD2SL(mx, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CVTTSD2SL", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mx) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CVTTSD2SL", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CVTTSD2SL: bad operands") +} + +// CVTTSD2SQ: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer. +// +// Forms: +// +// CVTTSD2SQ xmm r64 +// CVTTSD2SQ m64 r64 +func CVTTSD2SQ(mx, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CVTTSD2SQ", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mx) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CVTTSD2SQ", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CVTTSD2SQ: bad operands") +} + +// CVTTSS2SL: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer. +// +// Forms: +// +// CVTTSS2SL xmm r32 +// CVTTSS2SL m32 r32 +// CVTTSS2SL xmm r64 +// CVTTSS2SL m32 r64 +func CVTTSS2SL(mx, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CVTTSS2SL", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mx) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "CVTTSS2SL", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsXMM(mx) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CVTTSS2SL", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mx) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "CVTTSS2SL", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("CVTTSS2SL: bad operands") +} + +// CWD: Convert Word to Doubleword. +// +// Forms: +// +// CWD +func CWD() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "CWD", + Operands: nil, + Inputs: []operand.Op{reg.AX}, + Outputs: []operand.Op{reg.DX}, + }, nil +} + +// CWDE: Convert Word to Doubleword. +// +// Forms: +// +// CWDE +func CWDE() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "CWDE", + Operands: nil, + Inputs: []operand.Op{reg.AX}, + Outputs: []operand.Op{reg.EAX}, + }, nil +} + +// DECB: Decrement by 1. +// +// Forms: +// +// DECB r8 +// DECB m8 +func DECB(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "DECB", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "DECB", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("DECB: bad operands") +} + +// DECL: Decrement by 1. +// +// Forms: +// +// DECL r32 +// DECL m32 +func DECL(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "DECL", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "DECL", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("DECL: bad operands") +} + +// DECQ: Decrement by 1. +// +// Forms: +// +// DECQ r64 +// DECQ m64 +func DECQ(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "DECQ", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "DECQ", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("DECQ: bad operands") +} + +// DECW: Decrement by 1. +// +// Forms: +// +// DECW r16 +// DECW m16 +func DECW(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "DECW", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "DECW", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("DECW: bad operands") +} + +// DIVB: Unsigned Divide. +// +// Forms: +// +// DIVB r8 +// DIVB m8 +func DIVB(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "DIVB", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.AX}, + Outputs: []operand.Op{reg.AX}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "DIVB", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.AX}, + Outputs: []operand.Op{reg.AX}, + }, nil + } + return nil, errors.New("DIVB: bad operands") +} + +// DIVL: Unsigned Divide. +// +// Forms: +// +// DIVL r32 +// DIVL m32 +func DIVL(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "DIVL", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.EAX, reg.EDX}, + Outputs: []operand.Op{reg.EAX, reg.EDX}, + }, nil + case operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "DIVL", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.EAX, reg.EDX}, + Outputs: []operand.Op{reg.EAX, reg.EDX}, + }, nil + } + return nil, errors.New("DIVL: bad operands") +} + +// DIVPD: Divide Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// DIVPD xmm xmm +// DIVPD m128 xmm +func DIVPD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "DIVPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "DIVPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("DIVPD: bad operands") +} + +// DIVPS: Divide Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// DIVPS xmm xmm +// DIVPS m128 xmm +func DIVPS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "DIVPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "DIVPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("DIVPS: bad operands") +} + +// DIVQ: Unsigned Divide. +// +// Forms: +// +// DIVQ r64 +// DIVQ m64 +func DIVQ(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "DIVQ", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.RAX, reg.RDX}, + Outputs: []operand.Op{reg.RAX, reg.RDX}, + }, nil + case operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "DIVQ", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.RAX, reg.RDX}, + Outputs: []operand.Op{reg.RAX, reg.RDX}, + }, nil + } + return nil, errors.New("DIVQ: bad operands") +} + +// DIVSD: Divide Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// DIVSD xmm xmm +// DIVSD m64 xmm +func DIVSD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "DIVSD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "DIVSD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("DIVSD: bad operands") +} + +// DIVSS: Divide Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// DIVSS xmm xmm +// DIVSS m32 xmm +func DIVSS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "DIVSS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "DIVSS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("DIVSS: bad operands") +} + +// DIVW: Unsigned Divide. +// +// Forms: +// +// DIVW r16 +// DIVW m16 +func DIVW(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "DIVW", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.AX, reg.DX}, + Outputs: []operand.Op{reg.AX, reg.DX}, + }, nil + case operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "DIVW", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.AX, reg.DX}, + Outputs: []operand.Op{reg.AX, reg.DX}, + }, nil + } + return nil, errors.New("DIVW: bad operands") +} + +// DPPD: Dot Product of Packed Double Precision Floating-Point Values. +// +// Forms: +// +// DPPD imm8 xmm xmm +// DPPD imm8 m128 xmm +func DPPD(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "DPPD", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "DPPD", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("DPPD: bad operands") +} + +// DPPS: Dot Product of Packed Single Precision Floating-Point Values. +// +// Forms: +// +// DPPS imm8 xmm xmm +// DPPS imm8 m128 xmm +func DPPS(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "DPPS", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "DPPS", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("DPPS: bad operands") +} + +// EXTRACTPS: Extract Packed Single Precision Floating-Point Value. +// +// Forms: +// +// EXTRACTPS imm2u xmm r32 +// EXTRACTPS imm2u xmm m32 +func EXTRACTPS(i, x, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM2U(i) && operand.IsXMM(x) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "EXTRACTPS", + Operands: []operand.Op{i, x, mr}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM2U(i) && operand.IsXMM(x) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "EXTRACTPS", + Operands: []operand.Op{i, x, mr}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("EXTRACTPS: bad operands") +} + +// HADDPD: Packed Double-FP Horizontal Add. +// +// Forms: +// +// HADDPD xmm xmm +// HADDPD m128 xmm +func HADDPD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "HADDPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "HADDPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("HADDPD: bad operands") +} + +// HADDPS: Packed Single-FP Horizontal Add. +// +// Forms: +// +// HADDPS xmm xmm +// HADDPS m128 xmm +func HADDPS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "HADDPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "HADDPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("HADDPS: bad operands") +} + +// HSUBPD: Packed Double-FP Horizontal Subtract. +// +// Forms: +// +// HSUBPD xmm xmm +// HSUBPD m128 xmm +func HSUBPD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "HSUBPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "HSUBPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("HSUBPD: bad operands") +} + +// HSUBPS: Packed Single-FP Horizontal Subtract. +// +// Forms: +// +// HSUBPS xmm xmm +// HSUBPS m128 xmm +func HSUBPS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "HSUBPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "HSUBPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("HSUBPS: bad operands") +} + +// IDIVB: Signed Divide. +// +// Forms: +// +// IDIVB r8 +// IDIVB m8 +func IDIVB(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "IDIVB", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.AX}, + Outputs: []operand.Op{reg.AX}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "IDIVB", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.AX}, + Outputs: []operand.Op{reg.AX}, + }, nil + } + return nil, errors.New("IDIVB: bad operands") +} + +// IDIVL: Signed Divide. +// +// Forms: +// +// IDIVL r32 +// IDIVL m32 +func IDIVL(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "IDIVL", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.EAX, reg.EDX}, + Outputs: []operand.Op{reg.EAX, reg.EDX}, + }, nil + case operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "IDIVL", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.EAX, reg.EDX}, + Outputs: []operand.Op{reg.EAX, reg.EDX}, + }, nil + } + return nil, errors.New("IDIVL: bad operands") +} + +// IDIVQ: Signed Divide. +// +// Forms: +// +// IDIVQ r64 +// IDIVQ m64 +func IDIVQ(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "IDIVQ", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.RAX, reg.RDX}, + Outputs: []operand.Op{reg.RAX, reg.RDX}, + }, nil + case operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "IDIVQ", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.RAX, reg.RDX}, + Outputs: []operand.Op{reg.RAX, reg.RDX}, + }, nil + } + return nil, errors.New("IDIVQ: bad operands") +} + +// IDIVW: Signed Divide. +// +// Forms: +// +// IDIVW r16 +// IDIVW m16 +func IDIVW(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "IDIVW", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.AX, reg.DX}, + Outputs: []operand.Op{reg.AX, reg.DX}, + }, nil + case operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "IDIVW", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.AX, reg.DX}, + Outputs: []operand.Op{reg.AX, reg.DX}, + }, nil + } + return nil, errors.New("IDIVW: bad operands") +} + +// IMUL3L: Signed Multiply. +// +// Forms: +// +// IMUL3L imm8 r32 r32 +// IMUL3L imm32 r32 r32 +// IMUL3L imm8 m32 r32 +// IMUL3L imm32 m32 r32 +func IMUL3L(i, mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "IMUL3L", + Operands: []operand.Op{i, mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsIMM32(i) && operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "IMUL3L", + Operands: []operand.Op{i, mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsIMM8(i) && operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "IMUL3L", + Operands: []operand.Op{i, mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsIMM32(i) && operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "IMUL3L", + Operands: []operand.Op{i, mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("IMUL3L: bad operands") +} + +// IMUL3Q: Signed Multiply. +// +// Forms: +// +// IMUL3Q imm8 r64 r64 +// IMUL3Q imm32 r64 r64 +// IMUL3Q imm8 m64 r64 +// IMUL3Q imm32 m64 r64 +func IMUL3Q(i, mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "IMUL3Q", + Operands: []operand.Op{i, mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsIMM32(i) && operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "IMUL3Q", + Operands: []operand.Op{i, mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsIMM8(i) && operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "IMUL3Q", + Operands: []operand.Op{i, mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsIMM32(i) && operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "IMUL3Q", + Operands: []operand.Op{i, mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("IMUL3Q: bad operands") +} + +// IMUL3W: Signed Multiply. +// +// Forms: +// +// IMUL3W imm8 r16 r16 +// IMUL3W imm16 r16 r16 +// IMUL3W imm8 m16 r16 +// IMUL3W imm16 m16 r16 +func IMUL3W(i, mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsR16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "IMUL3W", + Operands: []operand.Op{i, mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsIMM16(i) && operand.IsR16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "IMUL3W", + Operands: []operand.Op{i, mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsIMM8(i) && operand.IsM16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "IMUL3W", + Operands: []operand.Op{i, mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsIMM16(i) && operand.IsM16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "IMUL3W", + Operands: []operand.Op{i, mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("IMUL3W: bad operands") +} + +// IMULB: Signed Multiply. +// +// Forms: +// +// IMULB r8 +// IMULB m8 +func IMULB(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "IMULB", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.AL}, + Outputs: []operand.Op{reg.AX}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "IMULB", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.AL}, + Outputs: []operand.Op{reg.AX}, + }, nil + } + return nil, errors.New("IMULB: bad operands") +} + +// IMULL: Signed Multiply. +// +// Forms: +// +// IMULL r32 +// IMULL m32 +// IMULL r32 r32 +// IMULL m32 r32 +func IMULL(ops ...operand.Op) (*intrep.Instruction, error) { + switch { + case len(ops) == 1 && operand.IsR32(ops[0]): + return &intrep.Instruction{ + Opcode: "IMULL", + Operands: ops, + Inputs: []operand.Op{ops[0], reg.EAX}, + Outputs: []operand.Op{reg.EAX, reg.EDX}, + }, nil + case len(ops) == 1 && operand.IsM32(ops[0]): + return &intrep.Instruction{ + Opcode: "IMULL", + Operands: ops, + Inputs: []operand.Op{ops[0], reg.EAX}, + Outputs: []operand.Op{reg.EAX, reg.EDX}, + }, nil + case len(ops) == 2 && operand.IsR32(ops[0]) && operand.IsR32(ops[1]): + return &intrep.Instruction{ + Opcode: "IMULL", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsM32(ops[0]) && operand.IsR32(ops[1]): + return &intrep.Instruction{ + Opcode: "IMULL", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + } + return nil, errors.New("IMULL: bad operands") +} + +// IMULQ: Signed Multiply. +// +// Forms: +// +// IMULQ r64 +// IMULQ m64 +// IMULQ r64 r64 +// IMULQ m64 r64 +func IMULQ(ops ...operand.Op) (*intrep.Instruction, error) { + switch { + case len(ops) == 1 && operand.IsR64(ops[0]): + return &intrep.Instruction{ + Opcode: "IMULQ", + Operands: ops, + Inputs: []operand.Op{ops[0], reg.RAX}, + Outputs: []operand.Op{reg.RAX, reg.RDX}, + }, nil + case len(ops) == 1 && operand.IsM64(ops[0]): + return &intrep.Instruction{ + Opcode: "IMULQ", + Operands: ops, + Inputs: []operand.Op{ops[0], reg.RAX}, + Outputs: []operand.Op{reg.RAX, reg.RDX}, + }, nil + case len(ops) == 2 && operand.IsR64(ops[0]) && operand.IsR64(ops[1]): + return &intrep.Instruction{ + Opcode: "IMULQ", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsM64(ops[0]) && operand.IsR64(ops[1]): + return &intrep.Instruction{ + Opcode: "IMULQ", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + } + return nil, errors.New("IMULQ: bad operands") +} + +// IMULW: Signed Multiply. +// +// Forms: +// +// IMULW r16 +// IMULW m16 +// IMULW r16 r16 +// IMULW m16 r16 +func IMULW(ops ...operand.Op) (*intrep.Instruction, error) { + switch { + case len(ops) == 1 && operand.IsR16(ops[0]): + return &intrep.Instruction{ + Opcode: "IMULW", + Operands: ops, + Inputs: []operand.Op{ops[0], reg.AX}, + Outputs: []operand.Op{reg.AX, reg.DX}, + }, nil + case len(ops) == 1 && operand.IsM16(ops[0]): + return &intrep.Instruction{ + Opcode: "IMULW", + Operands: ops, + Inputs: []operand.Op{ops[0], reg.AX}, + Outputs: []operand.Op{reg.AX, reg.DX}, + }, nil + case len(ops) == 2 && operand.IsR16(ops[0]) && operand.IsR16(ops[1]): + return &intrep.Instruction{ + Opcode: "IMULW", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsM16(ops[0]) && operand.IsR16(ops[1]): + return &intrep.Instruction{ + Opcode: "IMULW", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + } + return nil, errors.New("IMULW: bad operands") +} + +// INCB: Increment by 1. +// +// Forms: +// +// INCB r8 +// INCB m8 +func INCB(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "INCB", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "INCB", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("INCB: bad operands") +} + +// INCL: Increment by 1. +// +// Forms: +// +// INCL r32 +// INCL m32 +func INCL(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "INCL", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "INCL", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("INCL: bad operands") +} + +// INCQ: Increment by 1. +// +// Forms: +// +// INCQ r64 +// INCQ m64 +func INCQ(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "INCQ", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "INCQ", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("INCQ: bad operands") +} + +// INCW: Increment by 1. +// +// Forms: +// +// INCW r16 +// INCW m16 +func INCW(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "INCW", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "INCW", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("INCW: bad operands") +} + +// INSERTPS: Insert Packed Single Precision Floating-Point Value. +// +// Forms: +// +// INSERTPS imm8 xmm xmm +// INSERTPS imm8 m32 xmm +func INSERTPS(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "INSERTPS", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM32(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "INSERTPS", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("INSERTPS: bad operands") +} + +// INT: Call to Interrupt Procedure. +// +// Forms: +// +// INT 3 +// INT imm8 +func INT(i operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is3(i): + return &intrep.Instruction{ + Opcode: "INT", + Operands: []operand.Op{i}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + }, nil + case operand.IsIMM8(i): + return &intrep.Instruction{ + Opcode: "INT", + Operands: []operand.Op{i}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("INT: bad operands") +} + +// JA: Jump if above (CF == 0 and ZF == 0). +// +// Forms: +// +// JA rel8 +// JA rel32 +func JA(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JA", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JA", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JA: bad operands") +} + +// JAE: Jump if above or equal (CF == 0). +// +// Forms: +// +// JAE rel8 +// JAE rel32 +func JAE(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JAE", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JAE", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JAE: bad operands") +} + +// JB: Jump if below (CF == 1). +// +// Forms: +// +// JB rel8 +// JB rel32 +func JB(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JB", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JB", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JB: bad operands") +} + +// JBE: Jump if below or equal (CF == 1 or ZF == 1). +// +// Forms: +// +// JBE rel8 +// JBE rel32 +func JBE(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JBE", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JBE", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JBE: bad operands") +} + +// JC: Jump if below (CF == 1). +// +// Forms: +// +// JC rel8 +// JC rel32 +func JC(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JC", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JC", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JC: bad operands") +} + +// JCC: Jump if above or equal (CF == 0). +// +// Forms: +// +// JCC rel8 +// JCC rel32 +func JCC(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JCC", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JCC", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JCC: bad operands") +} + +// JCS: Jump if below (CF == 1). +// +// Forms: +// +// JCS rel8 +// JCS rel32 +func JCS(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JCS", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JCS", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JCS: bad operands") +} + +// JCXZL: Jump if ECX register is 0. +// +// Forms: +// +// JCXZL rel8 +func JCXZL(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JCXZL", + Operands: []operand.Op{r}, + Inputs: []operand.Op{reg.ECX}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JCXZL: bad operands") +} + +// JCXZQ: Jump if RCX register is 0. +// +// Forms: +// +// JCXZQ rel8 +func JCXZQ(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JCXZQ", + Operands: []operand.Op{r}, + Inputs: []operand.Op{reg.RCX}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JCXZQ: bad operands") +} + +// JE: Jump if equal (ZF == 1). +// +// Forms: +// +// JE rel8 +// JE rel32 +func JE(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JE", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JE", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JE: bad operands") +} + +// JEQ: Jump if equal (ZF == 1). +// +// Forms: +// +// JEQ rel8 +// JEQ rel32 +func JEQ(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JEQ", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JEQ", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JEQ: bad operands") +} + +// JG: Jump if greater (ZF == 0 and SF == OF). +// +// Forms: +// +// JG rel8 +// JG rel32 +func JG(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JG", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JG", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JG: bad operands") +} + +// JGE: Jump if greater or equal (SF == OF). +// +// Forms: +// +// JGE rel8 +// JGE rel32 +func JGE(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JGE", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JGE", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JGE: bad operands") +} + +// JGT: Jump if greater (ZF == 0 and SF == OF). +// +// Forms: +// +// JGT rel8 +// JGT rel32 +func JGT(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JGT", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JGT", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JGT: bad operands") +} + +// JHI: Jump if above (CF == 0 and ZF == 0). +// +// Forms: +// +// JHI rel8 +// JHI rel32 +func JHI(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JHI", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JHI", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JHI: bad operands") +} + +// JHS: Jump if above or equal (CF == 0). +// +// Forms: +// +// JHS rel8 +// JHS rel32 +func JHS(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JHS", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JHS", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JHS: bad operands") +} + +// JL: Jump if less (SF != OF). +// +// Forms: +// +// JL rel8 +// JL rel32 +func JL(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JL", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JL", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JL: bad operands") +} + +// JLE: Jump if less or equal (ZF == 1 or SF != OF). +// +// Forms: +// +// JLE rel8 +// JLE rel32 +func JLE(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JLE", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JLE", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JLE: bad operands") +} + +// JLO: Jump if below (CF == 1). +// +// Forms: +// +// JLO rel8 +// JLO rel32 +func JLO(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JLO", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JLO", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JLO: bad operands") +} + +// JLS: Jump if below or equal (CF == 1 or ZF == 1). +// +// Forms: +// +// JLS rel8 +// JLS rel32 +func JLS(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JLS", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JLS", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JLS: bad operands") +} + +// JLT: Jump if less (SF != OF). +// +// Forms: +// +// JLT rel8 +// JLT rel32 +func JLT(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JLT", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JLT", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JLT: bad operands") +} + +// JMI: Jump if sign (SF == 1). +// +// Forms: +// +// JMI rel8 +// JMI rel32 +func JMI(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JMI", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JMI", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JMI: bad operands") +} + +// JMP: Jump Unconditionally. +// +// Forms: +// +// JMP rel8 +// JMP rel32 +// JMP r64 +// JMP m64 +func JMP(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(mr): + return &intrep.Instruction{ + Opcode: "JMP", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: false, + }, nil + case operand.IsREL32(mr): + return &intrep.Instruction{ + Opcode: "JMP", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: false, + }, nil + case operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "JMP", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: false, + }, nil + case operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "JMP", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: false, + }, nil + } + return nil, errors.New("JMP: bad operands") +} + +// JNA: Jump if below or equal (CF == 1 or ZF == 1). +// +// Forms: +// +// JNA rel8 +// JNA rel32 +func JNA(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JNA", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JNA", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JNA: bad operands") +} + +// JNAE: Jump if below (CF == 1). +// +// Forms: +// +// JNAE rel8 +// JNAE rel32 +func JNAE(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JNAE", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JNAE", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JNAE: bad operands") +} + +// JNB: Jump if above or equal (CF == 0). +// +// Forms: +// +// JNB rel8 +// JNB rel32 +func JNB(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JNB", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JNB", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JNB: bad operands") +} + +// JNBE: Jump if above (CF == 0 and ZF == 0). +// +// Forms: +// +// JNBE rel8 +// JNBE rel32 +func JNBE(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JNBE", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JNBE", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JNBE: bad operands") +} + +// JNC: Jump if above or equal (CF == 0). +// +// Forms: +// +// JNC rel8 +// JNC rel32 +func JNC(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JNC", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JNC", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JNC: bad operands") +} + +// JNE: Jump if not equal (ZF == 0). +// +// Forms: +// +// JNE rel8 +// JNE rel32 +func JNE(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JNE", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JNE", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JNE: bad operands") +} + +// JNG: Jump if less or equal (ZF == 1 or SF != OF). +// +// Forms: +// +// JNG rel8 +// JNG rel32 +func JNG(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JNG", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JNG", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JNG: bad operands") +} + +// JNGE: Jump if less (SF != OF). +// +// Forms: +// +// JNGE rel8 +// JNGE rel32 +func JNGE(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JNGE", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JNGE", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JNGE: bad operands") +} + +// JNL: Jump if greater or equal (SF == OF). +// +// Forms: +// +// JNL rel8 +// JNL rel32 +func JNL(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JNL", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JNL", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JNL: bad operands") +} + +// JNLE: Jump if greater (ZF == 0 and SF == OF). +// +// Forms: +// +// JNLE rel8 +// JNLE rel32 +func JNLE(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JNLE", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JNLE", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JNLE: bad operands") +} + +// JNO: Jump if not overflow (OF == 0). +// +// Forms: +// +// JNO rel8 +// JNO rel32 +func JNO(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JNO", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JNO", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JNO: bad operands") +} + +// JNP: Jump if not parity (PF == 0). +// +// Forms: +// +// JNP rel8 +// JNP rel32 +func JNP(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JNP", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JNP", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JNP: bad operands") +} + +// JNS: Jump if not sign (SF == 0). +// +// Forms: +// +// JNS rel8 +// JNS rel32 +func JNS(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JNS", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JNS", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JNS: bad operands") +} + +// JNZ: Jump if not equal (ZF == 0). +// +// Forms: +// +// JNZ rel8 +// JNZ rel32 +func JNZ(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JNZ", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JNZ", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JNZ: bad operands") +} + +// JO: Jump if overflow (OF == 1). +// +// Forms: +// +// JO rel8 +// JO rel32 +func JO(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JO", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JO", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JO: bad operands") +} + +// JOC: Jump if not overflow (OF == 0). +// +// Forms: +// +// JOC rel8 +// JOC rel32 +func JOC(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JOC", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JOC", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JOC: bad operands") +} + +// JOS: Jump if overflow (OF == 1). +// +// Forms: +// +// JOS rel8 +// JOS rel32 +func JOS(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JOS", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JOS", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JOS: bad operands") +} + +// JP: Jump if parity (PF == 1). +// +// Forms: +// +// JP rel8 +// JP rel32 +func JP(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JP", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JP", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JP: bad operands") +} + +// JPC: Jump if not parity (PF == 0). +// +// Forms: +// +// JPC rel8 +// JPC rel32 +func JPC(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JPC", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JPC", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JPC: bad operands") +} + +// JPE: Jump if parity (PF == 1). +// +// Forms: +// +// JPE rel8 +// JPE rel32 +func JPE(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JPE", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JPE", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JPE: bad operands") +} + +// JPL: Jump if not sign (SF == 0). +// +// Forms: +// +// JPL rel8 +// JPL rel32 +func JPL(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JPL", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JPL", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JPL: bad operands") +} + +// JPO: Jump if not parity (PF == 0). +// +// Forms: +// +// JPO rel8 +// JPO rel32 +func JPO(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JPO", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JPO", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JPO: bad operands") +} + +// JPS: Jump if parity (PF == 1). +// +// Forms: +// +// JPS rel8 +// JPS rel32 +func JPS(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JPS", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JPS", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JPS: bad operands") +} + +// JS: Jump if sign (SF == 1). +// +// Forms: +// +// JS rel8 +// JS rel32 +func JS(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JS", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JS", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JS: bad operands") +} + +// JZ: Jump if equal (ZF == 1). +// +// Forms: +// +// JZ rel8 +// JZ rel32 +func JZ(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsREL8(r): + return &intrep.Instruction{ + Opcode: "JZ", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + case operand.IsREL32(r): + return &intrep.Instruction{ + Opcode: "JZ", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + IsBranch: true, + IsConditional: true, + }, nil + } + return nil, errors.New("JZ: bad operands") +} + +// LDDQU: Load Unaligned Integer 128 Bits. +// +// Forms: +// +// LDDQU m128 xmm +func LDDQU(m, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM128(m) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "LDDQU", + Operands: []operand.Op{m, x}, + Inputs: []operand.Op{m}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("LDDQU: bad operands") +} + +// LDMXCSR: Load MXCSR Register. +// +// Forms: +// +// LDMXCSR m32 +func LDMXCSR(m operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM32(m): + return &intrep.Instruction{ + Opcode: "LDMXCSR", + Operands: []operand.Op{m}, + Inputs: []operand.Op{m}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("LDMXCSR: bad operands") +} + +// LEAL: Load Effective Address. +// +// Forms: +// +// LEAL m r32 +func LEAL(m, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM(m) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "LEAL", + Operands: []operand.Op{m, r}, + Inputs: []operand.Op{m}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("LEAL: bad operands") +} + +// LEAQ: Load Effective Address. +// +// Forms: +// +// LEAQ m r64 +func LEAQ(m, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM(m) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "LEAQ", + Operands: []operand.Op{m, r}, + Inputs: []operand.Op{m}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("LEAQ: bad operands") +} + +// LEAW: Load Effective Address. +// +// Forms: +// +// LEAW m r16 +func LEAW(m, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM(m) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "LEAW", + Operands: []operand.Op{m, r}, + Inputs: []operand.Op{m}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("LEAW: bad operands") +} + +// LFENCE: Load Fence. +// +// Forms: +// +// LFENCE +func LFENCE() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "LFENCE", + Operands: nil, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + }, nil +} + +// LZCNTL: Count the Number of Leading Zero Bits. +// +// Forms: +// +// LZCNTL r32 r32 +// LZCNTL m32 r32 +func LZCNTL(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "LZCNTL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "LZCNTL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("LZCNTL: bad operands") +} + +// LZCNTQ: Count the Number of Leading Zero Bits. +// +// Forms: +// +// LZCNTQ r64 r64 +// LZCNTQ m64 r64 +func LZCNTQ(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "LZCNTQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "LZCNTQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("LZCNTQ: bad operands") +} + +// LZCNTW: Count the Number of Leading Zero Bits. +// +// Forms: +// +// LZCNTW r16 r16 +// LZCNTW m16 r16 +func LZCNTW(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "LZCNTW", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "LZCNTW", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("LZCNTW: bad operands") +} + +// MASKMOVDQU: Store Selected Bytes of Double Quadword. +// +// Forms: +// +// MASKMOVDQU xmm xmm +func MASKMOVDQU(x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "MASKMOVDQU", + Operands: []operand.Op{x, x1}, + Inputs: []operand.Op{x, x1, reg.RDI}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("MASKMOVDQU: bad operands") +} + +// MASKMOVOU: Store Selected Bytes of Double Quadword. +// +// Forms: +// +// MASKMOVOU xmm xmm +func MASKMOVOU(x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "MASKMOVOU", + Operands: []operand.Op{x, x1}, + Inputs: []operand.Op{x, x1, reg.RDI}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("MASKMOVOU: bad operands") +} + +// MAXPD: Return Maximum Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// MAXPD xmm xmm +// MAXPD m128 xmm +func MAXPD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MAXPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MAXPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("MAXPD: bad operands") +} + +// MAXPS: Return Maximum Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// MAXPS xmm xmm +// MAXPS m128 xmm +func MAXPS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MAXPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MAXPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("MAXPS: bad operands") +} + +// MAXSD: Return Maximum Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// MAXSD xmm xmm +// MAXSD m64 xmm +func MAXSD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MAXSD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MAXSD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("MAXSD: bad operands") +} + +// MAXSS: Return Maximum Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// MAXSS xmm xmm +// MAXSS m32 xmm +func MAXSS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MAXSS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MAXSS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("MAXSS: bad operands") +} + +// MFENCE: Memory Fence. +// +// Forms: +// +// MFENCE +func MFENCE() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "MFENCE", + Operands: nil, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + }, nil +} + +// MINPD: Return Minimum Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// MINPD xmm xmm +// MINPD m128 xmm +func MINPD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MINPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MINPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("MINPD: bad operands") +} + +// MINPS: Return Minimum Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// MINPS xmm xmm +// MINPS m128 xmm +func MINPS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MINPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MINPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("MINPS: bad operands") +} + +// MINSD: Return Minimum Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// MINSD xmm xmm +// MINSD m64 xmm +func MINSD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MINSD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MINSD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("MINSD: bad operands") +} + +// MINSS: Return Minimum Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// MINSS xmm xmm +// MINSS m32 xmm +func MINSS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MINSS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MINSS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("MINSS: bad operands") +} + +// MONITOR: Monitor a Linear Address Range. +// +// Forms: +// +// MONITOR +func MONITOR() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "MONITOR", + Operands: nil, + Inputs: []operand.Op{reg.RAX, reg.ECX, reg.EDX}, + Outputs: []operand.Op{}, + }, nil +} + +// MOVAPD: Move Aligned Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// MOVAPD xmm xmm +// MOVAPD m128 xmm +// MOVAPD xmm m128 +func MOVAPD(mx, mx1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(mx1): + return &intrep.Instruction{ + Opcode: "MOVAPD", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(mx1): + return &intrep.Instruction{ + Opcode: "MOVAPD", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + case operand.IsXMM(mx) && operand.IsM128(mx1): + return &intrep.Instruction{ + Opcode: "MOVAPD", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + } + return nil, errors.New("MOVAPD: bad operands") +} + +// MOVAPS: Move Aligned Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// MOVAPS xmm xmm +// MOVAPS m128 xmm +// MOVAPS xmm m128 +func MOVAPS(mx, mx1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(mx1): + return &intrep.Instruction{ + Opcode: "MOVAPS", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(mx1): + return &intrep.Instruction{ + Opcode: "MOVAPS", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + case operand.IsXMM(mx) && operand.IsM128(mx1): + return &intrep.Instruction{ + Opcode: "MOVAPS", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + } + return nil, errors.New("MOVAPS: bad operands") +} + +// MOVB: Move. +// +// Forms: +// +// MOVB imm8 r8 +// MOVB r8 r8 +// MOVB m8 r8 +// MOVB imm8 m8 +// MOVB r8 m8 +func MOVB(imr, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imr) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "MOVB", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR8(imr) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "MOVB", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM8(imr) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "MOVB", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "MOVB", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR8(imr) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "MOVB", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("MOVB: bad operands") +} + +// MOVBELL: Move Data After Swapping Bytes. +// +// Forms: +// +// MOVBELL m32 r32 +// MOVBELL r32 m32 +func MOVBELL(mr, mr1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM32(mr) && operand.IsR32(mr1): + return &intrep.Instruction{ + Opcode: "MOVBELL", + Operands: []operand.Op{mr, mr1}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr1}, + }, nil + case operand.IsR32(mr) && operand.IsM32(mr1): + return &intrep.Instruction{ + Opcode: "MOVBELL", + Operands: []operand.Op{mr, mr1}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr1}, + }, nil + } + return nil, errors.New("MOVBELL: bad operands") +} + +// MOVBEQQ: Move Data After Swapping Bytes. +// +// Forms: +// +// MOVBEQQ m64 r64 +// MOVBEQQ r64 m64 +func MOVBEQQ(mr, mr1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM64(mr) && operand.IsR64(mr1): + return &intrep.Instruction{ + Opcode: "MOVBEQQ", + Operands: []operand.Op{mr, mr1}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr1}, + }, nil + case operand.IsR64(mr) && operand.IsM64(mr1): + return &intrep.Instruction{ + Opcode: "MOVBEQQ", + Operands: []operand.Op{mr, mr1}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr1}, + }, nil + } + return nil, errors.New("MOVBEQQ: bad operands") +} + +// MOVBEWW: Move Data After Swapping Bytes. +// +// Forms: +// +// MOVBEWW m16 r16 +// MOVBEWW r16 m16 +func MOVBEWW(mr, mr1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM16(mr) && operand.IsR16(mr1): + return &intrep.Instruction{ + Opcode: "MOVBEWW", + Operands: []operand.Op{mr, mr1}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr1}, + }, nil + case operand.IsR16(mr) && operand.IsM16(mr1): + return &intrep.Instruction{ + Opcode: "MOVBEWW", + Operands: []operand.Op{mr, mr1}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr1}, + }, nil + } + return nil, errors.New("MOVBEWW: bad operands") +} + +// MOVBLSX: Move with Sign-Extension. +// +// Forms: +// +// MOVBLSX r8 r32 +// MOVBLSX m8 r32 +func MOVBLSX(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "MOVBLSX", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM8(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "MOVBLSX", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("MOVBLSX: bad operands") +} + +// MOVBLZX: Move with Zero-Extend. +// +// Forms: +// +// MOVBLZX r8 r32 +// MOVBLZX m8 r32 +func MOVBLZX(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "MOVBLZX", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM8(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "MOVBLZX", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("MOVBLZX: bad operands") +} + +// MOVBQSX: Move with Sign-Extension. +// +// Forms: +// +// MOVBQSX r8 r64 +// MOVBQSX m8 r64 +func MOVBQSX(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "MOVBQSX", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM8(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "MOVBQSX", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("MOVBQSX: bad operands") +} + +// MOVBQZX: Move with Zero-Extend. +// +// Forms: +// +// MOVBQZX r8 r64 +// MOVBQZX m8 r64 +func MOVBQZX(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "MOVBQZX", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM8(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "MOVBQZX", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("MOVBQZX: bad operands") +} + +// MOVBWSX: Move with Sign-Extension. +// +// Forms: +// +// MOVBWSX r8 r16 +// MOVBWSX m8 r16 +func MOVBWSX(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "MOVBWSX", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM8(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "MOVBWSX", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("MOVBWSX: bad operands") +} + +// MOVBWZX: Move with Zero-Extend. +// +// Forms: +// +// MOVBWZX r8 r16 +// MOVBWZX m8 r16 +func MOVBWZX(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "MOVBWZX", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM8(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "MOVBWZX", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("MOVBWZX: bad operands") +} + +// MOVD: Move. +// +// Forms: +// +// MOVD imm32 r64 +// MOVD imm64 r64 +// MOVD r64 r64 +// MOVD m64 r64 +// MOVD imm32 m64 +// MOVD r64 m64 +// MOVD xmm r64 +// MOVD r64 xmm +// MOVD xmm xmm +// MOVD m64 xmm +// MOVD xmm m64 +// MOVD xmm r32 +// MOVD r32 xmm +// MOVD m32 xmm +// MOVD xmm m32 +func MOVD(imrx, mrx operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM32(imrx) && operand.IsR64(mrx): + return &intrep.Instruction{ + Opcode: "MOVD", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsIMM64(imrx) && operand.IsR64(mrx): + return &intrep.Instruction{ + Opcode: "MOVD", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsR64(imrx) && operand.IsR64(mrx): + return &intrep.Instruction{ + Opcode: "MOVD", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsM64(imrx) && operand.IsR64(mrx): + return &intrep.Instruction{ + Opcode: "MOVD", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsIMM32(imrx) && operand.IsM64(mrx): + return &intrep.Instruction{ + Opcode: "MOVD", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsR64(imrx) && operand.IsM64(mrx): + return &intrep.Instruction{ + Opcode: "MOVD", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsXMM(imrx) && operand.IsR64(mrx): + return &intrep.Instruction{ + Opcode: "MOVD", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsR64(imrx) && operand.IsXMM(mrx): + return &intrep.Instruction{ + Opcode: "MOVD", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsXMM(imrx) && operand.IsXMM(mrx): + return &intrep.Instruction{ + Opcode: "MOVD", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsM64(imrx) && operand.IsXMM(mrx): + return &intrep.Instruction{ + Opcode: "MOVD", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsXMM(imrx) && operand.IsM64(mrx): + return &intrep.Instruction{ + Opcode: "MOVD", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsXMM(imrx) && operand.IsR32(mrx): + return &intrep.Instruction{ + Opcode: "MOVD", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsR32(imrx) && operand.IsXMM(mrx): + return &intrep.Instruction{ + Opcode: "MOVD", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsM32(imrx) && operand.IsXMM(mrx): + return &intrep.Instruction{ + Opcode: "MOVD", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsXMM(imrx) && operand.IsM32(mrx): + return &intrep.Instruction{ + Opcode: "MOVD", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + } + return nil, errors.New("MOVD: bad operands") +} + +// MOVDDUP: Move One Double-FP and Duplicate. +// +// Forms: +// +// MOVDDUP xmm xmm +// MOVDDUP m64 xmm +func MOVDDUP(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MOVDDUP", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MOVDDUP", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("MOVDDUP: bad operands") +} + +// MOVDQ2Q: Move. +// +// Forms: +// +// MOVDQ2Q imm32 r64 +// MOVDQ2Q imm64 r64 +// MOVDQ2Q r64 r64 +// MOVDQ2Q m64 r64 +// MOVDQ2Q imm32 m64 +// MOVDQ2Q r64 m64 +// MOVDQ2Q xmm r64 +// MOVDQ2Q r64 xmm +// MOVDQ2Q xmm xmm +// MOVDQ2Q m64 xmm +// MOVDQ2Q xmm m64 +// MOVDQ2Q xmm r32 +// MOVDQ2Q r32 xmm +// MOVDQ2Q m32 xmm +// MOVDQ2Q xmm m32 +func MOVDQ2Q(imrx, mrx operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM32(imrx) && operand.IsR64(mrx): + return &intrep.Instruction{ + Opcode: "MOVDQ2Q", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsIMM64(imrx) && operand.IsR64(mrx): + return &intrep.Instruction{ + Opcode: "MOVDQ2Q", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsR64(imrx) && operand.IsR64(mrx): + return &intrep.Instruction{ + Opcode: "MOVDQ2Q", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsM64(imrx) && operand.IsR64(mrx): + return &intrep.Instruction{ + Opcode: "MOVDQ2Q", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsIMM32(imrx) && operand.IsM64(mrx): + return &intrep.Instruction{ + Opcode: "MOVDQ2Q", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsR64(imrx) && operand.IsM64(mrx): + return &intrep.Instruction{ + Opcode: "MOVDQ2Q", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsXMM(imrx) && operand.IsR64(mrx): + return &intrep.Instruction{ + Opcode: "MOVDQ2Q", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsR64(imrx) && operand.IsXMM(mrx): + return &intrep.Instruction{ + Opcode: "MOVDQ2Q", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsXMM(imrx) && operand.IsXMM(mrx): + return &intrep.Instruction{ + Opcode: "MOVDQ2Q", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsM64(imrx) && operand.IsXMM(mrx): + return &intrep.Instruction{ + Opcode: "MOVDQ2Q", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsXMM(imrx) && operand.IsM64(mrx): + return &intrep.Instruction{ + Opcode: "MOVDQ2Q", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsXMM(imrx) && operand.IsR32(mrx): + return &intrep.Instruction{ + Opcode: "MOVDQ2Q", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsR32(imrx) && operand.IsXMM(mrx): + return &intrep.Instruction{ + Opcode: "MOVDQ2Q", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsM32(imrx) && operand.IsXMM(mrx): + return &intrep.Instruction{ + Opcode: "MOVDQ2Q", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsXMM(imrx) && operand.IsM32(mrx): + return &intrep.Instruction{ + Opcode: "MOVDQ2Q", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + } + return nil, errors.New("MOVDQ2Q: bad operands") +} + +// MOVHLPS: Move Packed Single-Precision Floating-Point Values High to Low. +// +// Forms: +// +// MOVHLPS xmm xmm +func MOVHLPS(x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "MOVHLPS", + Operands: []operand.Op{x, x1}, + Inputs: []operand.Op{x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("MOVHLPS: bad operands") +} + +// MOVHPD: Move High Packed Double-Precision Floating-Point Value. +// +// Forms: +// +// MOVHPD m64 xmm +// MOVHPD xmm m64 +func MOVHPD(mx, mx1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM64(mx) && operand.IsXMM(mx1): + return &intrep.Instruction{ + Opcode: "MOVHPD", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx, mx1}, + Outputs: []operand.Op{mx1}, + }, nil + case operand.IsXMM(mx) && operand.IsM64(mx1): + return &intrep.Instruction{ + Opcode: "MOVHPD", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + } + return nil, errors.New("MOVHPD: bad operands") +} + +// MOVHPS: Move High Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// MOVHPS m64 xmm +// MOVHPS xmm m64 +func MOVHPS(mx, mx1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM64(mx) && operand.IsXMM(mx1): + return &intrep.Instruction{ + Opcode: "MOVHPS", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx, mx1}, + Outputs: []operand.Op{mx1}, + }, nil + case operand.IsXMM(mx) && operand.IsM64(mx1): + return &intrep.Instruction{ + Opcode: "MOVHPS", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + } + return nil, errors.New("MOVHPS: bad operands") +} + +// MOVL: Move. +// +// Forms: +// +// MOVL imm32 r32 +// MOVL r32 r32 +// MOVL m32 r32 +// MOVL imm32 m32 +// MOVL r32 m32 +func MOVL(imr, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM32(imr) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "MOVL", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR32(imr) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "MOVL", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM32(imr) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "MOVL", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM32(imr) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "MOVL", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR32(imr) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "MOVL", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("MOVL: bad operands") +} + +// MOVLHPS: Move Packed Single-Precision Floating-Point Values Low to High. +// +// Forms: +// +// MOVLHPS xmm xmm +func MOVLHPS(x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "MOVLHPS", + Operands: []operand.Op{x, x1}, + Inputs: []operand.Op{x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("MOVLHPS: bad operands") +} + +// MOVLPD: Move Low Packed Double-Precision Floating-Point Value. +// +// Forms: +// +// MOVLPD m64 xmm +// MOVLPD xmm m64 +func MOVLPD(mx, mx1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM64(mx) && operand.IsXMM(mx1): + return &intrep.Instruction{ + Opcode: "MOVLPD", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx, mx1}, + Outputs: []operand.Op{mx1}, + }, nil + case operand.IsXMM(mx) && operand.IsM64(mx1): + return &intrep.Instruction{ + Opcode: "MOVLPD", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + } + return nil, errors.New("MOVLPD: bad operands") +} + +// MOVLPS: Move Low Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// MOVLPS m64 xmm +// MOVLPS xmm m64 +func MOVLPS(mx, mx1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM64(mx) && operand.IsXMM(mx1): + return &intrep.Instruction{ + Opcode: "MOVLPS", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx, mx1}, + Outputs: []operand.Op{mx1}, + }, nil + case operand.IsXMM(mx) && operand.IsM64(mx1): + return &intrep.Instruction{ + Opcode: "MOVLPS", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + } + return nil, errors.New("MOVLPS: bad operands") +} + +// MOVLQSX: Move Doubleword to Quadword with Sign-Extension. +// +// Forms: +// +// MOVLQSX r32 r64 +// MOVLQSX m32 r64 +func MOVLQSX(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "MOVLQSX", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "MOVLQSX", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("MOVLQSX: bad operands") +} + +// MOVLQZX: Move with Zero-Extend. +// +// Forms: +// +// MOVLQZX m32 r64 +func MOVLQZX(m, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM32(m) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "MOVLQZX", + Operands: []operand.Op{m, r}, + Inputs: []operand.Op{m}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("MOVLQZX: bad operands") +} + +// MOVMSKPD: Extract Packed Double-Precision Floating-Point Sign Mask. +// +// Forms: +// +// MOVMSKPD xmm r32 +func MOVMSKPD(x, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(x) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "MOVMSKPD", + Operands: []operand.Op{x, r}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("MOVMSKPD: bad operands") +} + +// MOVMSKPS: Extract Packed Single-Precision Floating-Point Sign Mask. +// +// Forms: +// +// MOVMSKPS xmm r32 +func MOVMSKPS(x, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(x) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "MOVMSKPS", + Operands: []operand.Op{x, r}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("MOVMSKPS: bad operands") +} + +// MOVNTDQ: Store Double Quadword Using Non-Temporal Hint. +// +// Forms: +// +// MOVNTDQ xmm m128 +func MOVNTDQ(x, m operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(x) && operand.IsM128(m): + return &intrep.Instruction{ + Opcode: "MOVNTDQ", + Operands: []operand.Op{x, m}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{m}, + }, nil + } + return nil, errors.New("MOVNTDQ: bad operands") +} + +// MOVNTDQA: Load Double Quadword Non-Temporal Aligned Hint. +// +// Forms: +// +// MOVNTDQA m128 xmm +func MOVNTDQA(m, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM128(m) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MOVNTDQA", + Operands: []operand.Op{m, x}, + Inputs: []operand.Op{m}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("MOVNTDQA: bad operands") +} + +// MOVNTIL: Store Doubleword Using Non-Temporal Hint. +// +// Forms: +// +// MOVNTIL r32 m32 +func MOVNTIL(r, m operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(r) && operand.IsM32(m): + return &intrep.Instruction{ + Opcode: "MOVNTIL", + Operands: []operand.Op{r, m}, + Inputs: []operand.Op{r}, + Outputs: []operand.Op{m}, + }, nil + } + return nil, errors.New("MOVNTIL: bad operands") +} + +// MOVNTIQ: Store Doubleword Using Non-Temporal Hint. +// +// Forms: +// +// MOVNTIQ r64 m64 +func MOVNTIQ(r, m operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(r) && operand.IsM64(m): + return &intrep.Instruction{ + Opcode: "MOVNTIQ", + Operands: []operand.Op{r, m}, + Inputs: []operand.Op{r}, + Outputs: []operand.Op{m}, + }, nil + } + return nil, errors.New("MOVNTIQ: bad operands") +} + +// MOVNTO: Store Double Quadword Using Non-Temporal Hint. +// +// Forms: +// +// MOVNTO xmm m128 +func MOVNTO(x, m operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(x) && operand.IsM128(m): + return &intrep.Instruction{ + Opcode: "MOVNTO", + Operands: []operand.Op{x, m}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{m}, + }, nil + } + return nil, errors.New("MOVNTO: bad operands") +} + +// MOVNTPD: Store Packed Double-Precision Floating-Point Values Using Non-Temporal Hint. +// +// Forms: +// +// MOVNTPD xmm m128 +func MOVNTPD(x, m operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(x) && operand.IsM128(m): + return &intrep.Instruction{ + Opcode: "MOVNTPD", + Operands: []operand.Op{x, m}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{m}, + }, nil + } + return nil, errors.New("MOVNTPD: bad operands") +} + +// MOVNTPS: Store Packed Single-Precision Floating-Point Values Using Non-Temporal Hint. +// +// Forms: +// +// MOVNTPS xmm m128 +func MOVNTPS(x, m operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(x) && operand.IsM128(m): + return &intrep.Instruction{ + Opcode: "MOVNTPS", + Operands: []operand.Op{x, m}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{m}, + }, nil + } + return nil, errors.New("MOVNTPS: bad operands") +} + +// MOVO: Move Aligned Double Quadword. +// +// Forms: +// +// MOVO xmm xmm +// MOVO m128 xmm +// MOVO xmm m128 +func MOVO(mx, mx1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(mx1): + return &intrep.Instruction{ + Opcode: "MOVO", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(mx1): + return &intrep.Instruction{ + Opcode: "MOVO", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + case operand.IsXMM(mx) && operand.IsM128(mx1): + return &intrep.Instruction{ + Opcode: "MOVO", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + } + return nil, errors.New("MOVO: bad operands") +} + +// MOVOA: Move Aligned Double Quadword. +// +// Forms: +// +// MOVOA xmm xmm +// MOVOA m128 xmm +// MOVOA xmm m128 +func MOVOA(mx, mx1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(mx1): + return &intrep.Instruction{ + Opcode: "MOVOA", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(mx1): + return &intrep.Instruction{ + Opcode: "MOVOA", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + case operand.IsXMM(mx) && operand.IsM128(mx1): + return &intrep.Instruction{ + Opcode: "MOVOA", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + } + return nil, errors.New("MOVOA: bad operands") +} + +// MOVOU: Move Unaligned Double Quadword. +// +// Forms: +// +// MOVOU xmm xmm +// MOVOU m128 xmm +// MOVOU xmm m128 +func MOVOU(mx, mx1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(mx1): + return &intrep.Instruction{ + Opcode: "MOVOU", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(mx1): + return &intrep.Instruction{ + Opcode: "MOVOU", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + case operand.IsXMM(mx) && operand.IsM128(mx1): + return &intrep.Instruction{ + Opcode: "MOVOU", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + } + return nil, errors.New("MOVOU: bad operands") +} + +// MOVQ: Move. +// +// Forms: +// +// MOVQ imm32 r64 +// MOVQ imm64 r64 +// MOVQ r64 r64 +// MOVQ m64 r64 +// MOVQ imm32 m64 +// MOVQ r64 m64 +// MOVQ xmm r64 +// MOVQ r64 xmm +// MOVQ xmm xmm +// MOVQ m64 xmm +// MOVQ xmm m64 +// MOVQ xmm r32 +// MOVQ r32 xmm +// MOVQ m32 xmm +// MOVQ xmm m32 +func MOVQ(imrx, mrx operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM32(imrx) && operand.IsR64(mrx): + return &intrep.Instruction{ + Opcode: "MOVQ", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsIMM64(imrx) && operand.IsR64(mrx): + return &intrep.Instruction{ + Opcode: "MOVQ", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsR64(imrx) && operand.IsR64(mrx): + return &intrep.Instruction{ + Opcode: "MOVQ", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsM64(imrx) && operand.IsR64(mrx): + return &intrep.Instruction{ + Opcode: "MOVQ", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsIMM32(imrx) && operand.IsM64(mrx): + return &intrep.Instruction{ + Opcode: "MOVQ", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsR64(imrx) && operand.IsM64(mrx): + return &intrep.Instruction{ + Opcode: "MOVQ", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsXMM(imrx) && operand.IsR64(mrx): + return &intrep.Instruction{ + Opcode: "MOVQ", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsR64(imrx) && operand.IsXMM(mrx): + return &intrep.Instruction{ + Opcode: "MOVQ", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsXMM(imrx) && operand.IsXMM(mrx): + return &intrep.Instruction{ + Opcode: "MOVQ", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsM64(imrx) && operand.IsXMM(mrx): + return &intrep.Instruction{ + Opcode: "MOVQ", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsXMM(imrx) && operand.IsM64(mrx): + return &intrep.Instruction{ + Opcode: "MOVQ", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsXMM(imrx) && operand.IsR32(mrx): + return &intrep.Instruction{ + Opcode: "MOVQ", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsR32(imrx) && operand.IsXMM(mrx): + return &intrep.Instruction{ + Opcode: "MOVQ", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsM32(imrx) && operand.IsXMM(mrx): + return &intrep.Instruction{ + Opcode: "MOVQ", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + case operand.IsXMM(imrx) && operand.IsM32(mrx): + return &intrep.Instruction{ + Opcode: "MOVQ", + Operands: []operand.Op{imrx, mrx}, + Inputs: []operand.Op{imrx}, + Outputs: []operand.Op{mrx}, + }, nil + } + return nil, errors.New("MOVQ: bad operands") +} + +// MOVSD: Move Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// MOVSD xmm xmm +// MOVSD m64 xmm +// MOVSD xmm m64 +func MOVSD(mx, mx1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(mx1): + return &intrep.Instruction{ + Opcode: "MOVSD", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx, mx1}, + Outputs: []operand.Op{mx1}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(mx1): + return &intrep.Instruction{ + Opcode: "MOVSD", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + case operand.IsXMM(mx) && operand.IsM64(mx1): + return &intrep.Instruction{ + Opcode: "MOVSD", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + } + return nil, errors.New("MOVSD: bad operands") +} + +// MOVSHDUP: Move Packed Single-FP High and Duplicate. +// +// Forms: +// +// MOVSHDUP xmm xmm +// MOVSHDUP m128 xmm +func MOVSHDUP(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MOVSHDUP", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MOVSHDUP", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("MOVSHDUP: bad operands") +} + +// MOVSLDUP: Move Packed Single-FP Low and Duplicate. +// +// Forms: +// +// MOVSLDUP xmm xmm +// MOVSLDUP m128 xmm +func MOVSLDUP(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MOVSLDUP", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MOVSLDUP", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("MOVSLDUP: bad operands") +} + +// MOVSS: Move Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// MOVSS xmm xmm +// MOVSS m32 xmm +// MOVSS xmm m32 +func MOVSS(mx, mx1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(mx1): + return &intrep.Instruction{ + Opcode: "MOVSS", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx, mx1}, + Outputs: []operand.Op{mx1}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(mx1): + return &intrep.Instruction{ + Opcode: "MOVSS", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + case operand.IsXMM(mx) && operand.IsM32(mx1): + return &intrep.Instruction{ + Opcode: "MOVSS", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + } + return nil, errors.New("MOVSS: bad operands") +} + +// MOVUPD: Move Unaligned Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// MOVUPD xmm xmm +// MOVUPD m128 xmm +// MOVUPD xmm m128 +func MOVUPD(mx, mx1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(mx1): + return &intrep.Instruction{ + Opcode: "MOVUPD", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(mx1): + return &intrep.Instruction{ + Opcode: "MOVUPD", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + case operand.IsXMM(mx) && operand.IsM128(mx1): + return &intrep.Instruction{ + Opcode: "MOVUPD", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + } + return nil, errors.New("MOVUPD: bad operands") +} + +// MOVUPS: Move Unaligned Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// MOVUPS xmm xmm +// MOVUPS m128 xmm +// MOVUPS xmm m128 +func MOVUPS(mx, mx1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(mx1): + return &intrep.Instruction{ + Opcode: "MOVUPS", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(mx1): + return &intrep.Instruction{ + Opcode: "MOVUPS", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + case operand.IsXMM(mx) && operand.IsM128(mx1): + return &intrep.Instruction{ + Opcode: "MOVUPS", + Operands: []operand.Op{mx, mx1}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{mx1}, + }, nil + } + return nil, errors.New("MOVUPS: bad operands") +} + +// MOVW: Move. +// +// Forms: +// +// MOVW imm16 r16 +// MOVW r16 r16 +// MOVW m16 r16 +// MOVW imm16 m16 +// MOVW r16 m16 +func MOVW(imr, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM16(imr) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "MOVW", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR16(imr) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "MOVW", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM16(imr) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "MOVW", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM16(imr) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "MOVW", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR16(imr) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "MOVW", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("MOVW: bad operands") +} + +// MOVWLSX: Move with Sign-Extension. +// +// Forms: +// +// MOVWLSX r16 r32 +// MOVWLSX m16 r32 +func MOVWLSX(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "MOVWLSX", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "MOVWLSX", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("MOVWLSX: bad operands") +} + +// MOVWLZX: Move with Zero-Extend. +// +// Forms: +// +// MOVWLZX r16 r32 +// MOVWLZX m16 r32 +func MOVWLZX(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "MOVWLZX", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "MOVWLZX", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("MOVWLZX: bad operands") +} + +// MOVWQSX: Move with Sign-Extension. +// +// Forms: +// +// MOVWQSX r16 r64 +// MOVWQSX m16 r64 +func MOVWQSX(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "MOVWQSX", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "MOVWQSX", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("MOVWQSX: bad operands") +} + +// MOVWQZX: Move with Zero-Extend. +// +// Forms: +// +// MOVWQZX r16 r64 +// MOVWQZX m16 r64 +func MOVWQZX(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "MOVWQZX", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "MOVWQZX", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("MOVWQZX: bad operands") +} + +// MPSADBW: Compute Multiple Packed Sums of Absolute Difference. +// +// Forms: +// +// MPSADBW imm8 xmm xmm +// MPSADBW imm8 m128 xmm +func MPSADBW(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MPSADBW", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MPSADBW", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("MPSADBW: bad operands") +} + +// MULB: Unsigned Multiply. +// +// Forms: +// +// MULB r8 +// MULB m8 +func MULB(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "MULB", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.AL}, + Outputs: []operand.Op{reg.AX}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "MULB", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.AL}, + Outputs: []operand.Op{reg.AX}, + }, nil + } + return nil, errors.New("MULB: bad operands") +} + +// MULL: Unsigned Multiply. +// +// Forms: +// +// MULL r32 +// MULL m32 +func MULL(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "MULL", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.EAX}, + Outputs: []operand.Op{reg.EAX, reg.EDX}, + }, nil + case operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "MULL", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.EAX}, + Outputs: []operand.Op{reg.EAX, reg.EDX}, + }, nil + } + return nil, errors.New("MULL: bad operands") +} + +// MULPD: Multiply Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// MULPD xmm xmm +// MULPD m128 xmm +func MULPD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MULPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MULPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("MULPD: bad operands") +} + +// MULPS: Multiply Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// MULPS xmm xmm +// MULPS m128 xmm +func MULPS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MULPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MULPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("MULPS: bad operands") +} + +// MULQ: Unsigned Multiply. +// +// Forms: +// +// MULQ r64 +// MULQ m64 +func MULQ(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "MULQ", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.RAX}, + Outputs: []operand.Op{reg.RAX, reg.RDX}, + }, nil + case operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "MULQ", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.RAX}, + Outputs: []operand.Op{reg.RAX, reg.RDX}, + }, nil + } + return nil, errors.New("MULQ: bad operands") +} + +// MULSD: Multiply Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// MULSD xmm xmm +// MULSD m64 xmm +func MULSD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MULSD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MULSD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("MULSD: bad operands") +} + +// MULSS: Multiply Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// MULSS xmm xmm +// MULSS m32 xmm +func MULSS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MULSS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "MULSS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("MULSS: bad operands") +} + +// MULW: Unsigned Multiply. +// +// Forms: +// +// MULW r16 +// MULW m16 +func MULW(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "MULW", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.AX}, + Outputs: []operand.Op{reg.AX, reg.DX}, + }, nil + case operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "MULW", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr, reg.AX}, + Outputs: []operand.Op{reg.AX, reg.DX}, + }, nil + } + return nil, errors.New("MULW: bad operands") +} + +// MULXL: Unsigned Multiply Without Affecting Flags. +// +// Forms: +// +// MULXL r32 r32 r32 +// MULXL m32 r32 r32 +func MULXL(mr, r, r1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r) && operand.IsR32(r1): + return &intrep.Instruction{ + Opcode: "MULXL", + Operands: []operand.Op{mr, r, r1}, + Inputs: []operand.Op{mr, reg.EDX}, + Outputs: []operand.Op{r, r1}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r) && operand.IsR32(r1): + return &intrep.Instruction{ + Opcode: "MULXL", + Operands: []operand.Op{mr, r, r1}, + Inputs: []operand.Op{mr, reg.EDX}, + Outputs: []operand.Op{r, r1}, + }, nil + } + return nil, errors.New("MULXL: bad operands") +} + +// MULXQ: Unsigned Multiply Without Affecting Flags. +// +// Forms: +// +// MULXQ r64 r64 r64 +// MULXQ m64 r64 r64 +func MULXQ(mr, r, r1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r) && operand.IsR64(r1): + return &intrep.Instruction{ + Opcode: "MULXQ", + Operands: []operand.Op{mr, r, r1}, + Inputs: []operand.Op{mr, reg.RDX}, + Outputs: []operand.Op{r, r1}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r) && operand.IsR64(r1): + return &intrep.Instruction{ + Opcode: "MULXQ", + Operands: []operand.Op{mr, r, r1}, + Inputs: []operand.Op{mr, reg.RDX}, + Outputs: []operand.Op{r, r1}, + }, nil + } + return nil, errors.New("MULXQ: bad operands") +} + +// MWAIT: Monitor Wait. +// +// Forms: +// +// MWAIT +func MWAIT() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "MWAIT", + Operands: nil, + Inputs: []operand.Op{reg.EAX, reg.ECX}, + Outputs: []operand.Op{}, + }, nil +} + +// NEGB: Two's Complement Negation. +// +// Forms: +// +// NEGB r8 +// NEGB m8 +func NEGB(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "NEGB", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "NEGB", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("NEGB: bad operands") +} + +// NEGL: Two's Complement Negation. +// +// Forms: +// +// NEGL r32 +// NEGL m32 +func NEGL(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "NEGL", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "NEGL", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("NEGL: bad operands") +} + +// NEGQ: Two's Complement Negation. +// +// Forms: +// +// NEGQ r64 +// NEGQ m64 +func NEGQ(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "NEGQ", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "NEGQ", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("NEGQ: bad operands") +} + +// NEGW: Two's Complement Negation. +// +// Forms: +// +// NEGW r16 +// NEGW m16 +func NEGW(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "NEGW", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "NEGW", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("NEGW: bad operands") +} + +// NOP: No Operation. +// +// Forms: +// +// NOP +func NOP() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "NOP", + Operands: nil, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + }, nil +} + +// NOTB: One's Complement Negation. +// +// Forms: +// +// NOTB r8 +// NOTB m8 +func NOTB(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "NOTB", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "NOTB", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("NOTB: bad operands") +} + +// NOTL: One's Complement Negation. +// +// Forms: +// +// NOTL r32 +// NOTL m32 +func NOTL(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "NOTL", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "NOTL", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("NOTL: bad operands") +} + +// NOTQ: One's Complement Negation. +// +// Forms: +// +// NOTQ r64 +// NOTQ m64 +func NOTQ(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "NOTQ", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "NOTQ", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("NOTQ: bad operands") +} + +// NOTW: One's Complement Negation. +// +// Forms: +// +// NOTW r16 +// NOTW m16 +func NOTW(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "NOTW", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "NOTW", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("NOTW: bad operands") +} + +// ORB: Logical Inclusive OR. +// +// Forms: +// +// ORB imm8 al +// ORB imm8 r8 +// ORB r8 r8 +// ORB m8 r8 +// ORB imm8 m8 +// ORB r8 m8 +func ORB(imr, amr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imr) && operand.IsAL(amr): + return &intrep.Instruction{ + Opcode: "ORB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR8(amr): + return &intrep.Instruction{ + Opcode: "ORB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR8(imr) && operand.IsR8(amr): + return &intrep.Instruction{ + Opcode: "ORB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsM8(imr) && operand.IsR8(amr): + return &intrep.Instruction{ + Opcode: "ORB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM8(amr): + return &intrep.Instruction{ + Opcode: "ORB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR8(imr) && operand.IsM8(amr): + return &intrep.Instruction{ + Opcode: "ORB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + } + return nil, errors.New("ORB: bad operands") +} + +// ORL: Logical Inclusive OR. +// +// Forms: +// +// ORL imm32 eax +// ORL imm8 r32 +// ORL imm32 r32 +// ORL r32 r32 +// ORL m32 r32 +// ORL imm8 m32 +// ORL imm32 m32 +// ORL r32 m32 +func ORL(imr, emr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM32(imr) && operand.IsEAX(emr): + return &intrep.Instruction{ + Opcode: "ORL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "ORL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM32(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "ORL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsR32(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "ORL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{imr, emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsM32(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "ORL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{imr, emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM32(emr): + return &intrep.Instruction{ + Opcode: "ORL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM32(imr) && operand.IsM32(emr): + return &intrep.Instruction{ + Opcode: "ORL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsR32(imr) && operand.IsM32(emr): + return &intrep.Instruction{ + Opcode: "ORL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{imr, emr}, + Outputs: []operand.Op{emr}, + }, nil + } + return nil, errors.New("ORL: bad operands") +} + +// ORPD: Bitwise Logical OR of Double-Precision Floating-Point Values. +// +// Forms: +// +// ORPD xmm xmm +// ORPD m128 xmm +func ORPD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ORPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ORPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("ORPD: bad operands") +} + +// ORPS: Bitwise Logical OR of Single-Precision Floating-Point Values. +// +// Forms: +// +// ORPS xmm xmm +// ORPS m128 xmm +func ORPS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ORPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ORPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("ORPS: bad operands") +} + +// ORQ: Logical Inclusive OR. +// +// Forms: +// +// ORQ imm32 rax +// ORQ imm8 r64 +// ORQ imm32 r64 +// ORQ r64 r64 +// ORQ m64 r64 +// ORQ imm8 m64 +// ORQ imm32 m64 +// ORQ r64 m64 +func ORQ(imr, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM32(imr) && operand.IsRAX(mr): + return &intrep.Instruction{ + Opcode: "ORQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "ORQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM32(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "ORQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR64(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "ORQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM64(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "ORQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "ORQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM32(imr) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "ORQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR64(imr) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "ORQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("ORQ: bad operands") +} + +// ORW: Logical Inclusive OR. +// +// Forms: +// +// ORW imm16 ax +// ORW imm8 r16 +// ORW imm16 r16 +// ORW r16 r16 +// ORW m16 r16 +// ORW imm8 m16 +// ORW imm16 m16 +// ORW r16 m16 +func ORW(imr, amr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM16(imr) && operand.IsAX(amr): + return &intrep.Instruction{ + Opcode: "ORW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "ORW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM16(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "ORW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR16(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "ORW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsM16(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "ORW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM16(amr): + return &intrep.Instruction{ + Opcode: "ORW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM16(imr) && operand.IsM16(amr): + return &intrep.Instruction{ + Opcode: "ORW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR16(imr) && operand.IsM16(amr): + return &intrep.Instruction{ + Opcode: "ORW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + } + return nil, errors.New("ORW: bad operands") +} + +// PABSB: Packed Absolute Value of Byte Integers. +// +// Forms: +// +// PABSB xmm xmm +// PABSB m128 xmm +func PABSB(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PABSB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PABSB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PABSB: bad operands") +} + +// PABSD: Packed Absolute Value of Doubleword Integers. +// +// Forms: +// +// PABSD xmm xmm +// PABSD m128 xmm +func PABSD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PABSD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PABSD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PABSD: bad operands") +} + +// PABSW: Packed Absolute Value of Word Integers. +// +// Forms: +// +// PABSW xmm xmm +// PABSW m128 xmm +func PABSW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PABSW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PABSW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PABSW: bad operands") +} + +// PACKSSLW: Pack Doublewords into Words with Signed Saturation. +// +// Forms: +// +// PACKSSLW xmm xmm +// PACKSSLW m128 xmm +func PACKSSLW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PACKSSLW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PACKSSLW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PACKSSLW: bad operands") +} + +// PACKSSWB: Pack Words into Bytes with Signed Saturation. +// +// Forms: +// +// PACKSSWB xmm xmm +// PACKSSWB m128 xmm +func PACKSSWB(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PACKSSWB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PACKSSWB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PACKSSWB: bad operands") +} + +// PACKUSDW: Pack Doublewords into Words with Unsigned Saturation. +// +// Forms: +// +// PACKUSDW xmm xmm +// PACKUSDW m128 xmm +func PACKUSDW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PACKUSDW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PACKUSDW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PACKUSDW: bad operands") +} + +// PACKUSWB: Pack Words into Bytes with Unsigned Saturation. +// +// Forms: +// +// PACKUSWB xmm xmm +// PACKUSWB m128 xmm +func PACKUSWB(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PACKUSWB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PACKUSWB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PACKUSWB: bad operands") +} + +// PADDB: Add Packed Byte Integers. +// +// Forms: +// +// PADDB xmm xmm +// PADDB m128 xmm +func PADDB(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PADDB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PADDB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PADDB: bad operands") +} + +// PADDD: Add Packed Doubleword Integers. +// +// Forms: +// +// PADDD xmm xmm +// PADDD m128 xmm +func PADDD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PADDD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PADDD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PADDD: bad operands") +} + +// PADDL: Add Packed Doubleword Integers. +// +// Forms: +// +// PADDL xmm xmm +// PADDL m128 xmm +func PADDL(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PADDL", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PADDL", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PADDL: bad operands") +} + +// PADDQ: Add Packed Quadword Integers. +// +// Forms: +// +// PADDQ xmm xmm +// PADDQ m128 xmm +func PADDQ(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PADDQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PADDQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PADDQ: bad operands") +} + +// PADDSB: Add Packed Signed Byte Integers with Signed Saturation. +// +// Forms: +// +// PADDSB xmm xmm +// PADDSB m128 xmm +func PADDSB(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PADDSB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PADDSB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PADDSB: bad operands") +} + +// PADDSW: Add Packed Signed Word Integers with Signed Saturation. +// +// Forms: +// +// PADDSW xmm xmm +// PADDSW m128 xmm +func PADDSW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PADDSW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PADDSW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PADDSW: bad operands") +} + +// PADDUSB: Add Packed Unsigned Byte Integers with Unsigned Saturation. +// +// Forms: +// +// PADDUSB xmm xmm +// PADDUSB m128 xmm +func PADDUSB(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PADDUSB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PADDUSB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PADDUSB: bad operands") +} + +// PADDUSW: Add Packed Unsigned Word Integers with Unsigned Saturation. +// +// Forms: +// +// PADDUSW xmm xmm +// PADDUSW m128 xmm +func PADDUSW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PADDUSW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PADDUSW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PADDUSW: bad operands") +} + +// PADDW: Add Packed Word Integers. +// +// Forms: +// +// PADDW xmm xmm +// PADDW m128 xmm +func PADDW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PADDW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PADDW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PADDW: bad operands") +} + +// PALIGNR: Packed Align Right. +// +// Forms: +// +// PALIGNR imm8 xmm xmm +// PALIGNR imm8 m128 xmm +func PALIGNR(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PALIGNR", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PALIGNR", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PALIGNR: bad operands") +} + +// PAND: Packed Bitwise Logical AND. +// +// Forms: +// +// PAND xmm xmm +// PAND m128 xmm +func PAND(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PAND", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PAND", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PAND: bad operands") +} + +// PANDN: Packed Bitwise Logical AND NOT. +// +// Forms: +// +// PANDN xmm xmm +// PANDN m128 xmm +func PANDN(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PANDN", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PANDN", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PANDN: bad operands") +} + +// PAUSE: Spin Loop Hint. +// +// Forms: +// +// PAUSE +func PAUSE() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "PAUSE", + Operands: nil, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + }, nil +} + +// PAVGB: Average Packed Byte Integers. +// +// Forms: +// +// PAVGB xmm xmm +// PAVGB m128 xmm +func PAVGB(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PAVGB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PAVGB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PAVGB: bad operands") +} + +// PAVGW: Average Packed Word Integers. +// +// Forms: +// +// PAVGW xmm xmm +// PAVGW m128 xmm +func PAVGW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PAVGW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PAVGW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PAVGW: bad operands") +} + +// PBLENDVB: Variable Blend Packed Bytes. +// +// Forms: +// +// PBLENDVB xmm0 xmm xmm +// PBLENDVB xmm0 m128 xmm +func PBLENDVB(x, mx, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM0(x) && operand.IsXMM(mx) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "PBLENDVB", + Operands: []operand.Op{x, mx, x1}, + Inputs: []operand.Op{x, mx, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsXMM0(x) && operand.IsM128(mx) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "PBLENDVB", + Operands: []operand.Op{x, mx, x1}, + Inputs: []operand.Op{x, mx, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("PBLENDVB: bad operands") +} + +// PBLENDW: Blend Packed Words. +// +// Forms: +// +// PBLENDW imm8 xmm xmm +// PBLENDW imm8 m128 xmm +func PBLENDW(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PBLENDW", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PBLENDW", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PBLENDW: bad operands") +} + +// PCLMULQDQ: Carry-Less Quadword Multiplication. +// +// Forms: +// +// PCLMULQDQ imm8 xmm xmm +// PCLMULQDQ imm8 m128 xmm +func PCLMULQDQ(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCLMULQDQ", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCLMULQDQ", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PCLMULQDQ: bad operands") +} + +// PCMPEQB: Compare Packed Byte Data for Equality. +// +// Forms: +// +// PCMPEQB xmm xmm +// PCMPEQB m128 xmm +func PCMPEQB(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPEQB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPEQB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PCMPEQB: bad operands") +} + +// PCMPEQL: Compare Packed Doubleword Data for Equality. +// +// Forms: +// +// PCMPEQL xmm xmm +// PCMPEQL m128 xmm +func PCMPEQL(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPEQL", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPEQL", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PCMPEQL: bad operands") +} + +// PCMPEQQ: Compare Packed Quadword Data for Equality. +// +// Forms: +// +// PCMPEQQ xmm xmm +// PCMPEQQ m128 xmm +func PCMPEQQ(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPEQQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPEQQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PCMPEQQ: bad operands") +} + +// PCMPEQW: Compare Packed Word Data for Equality. +// +// Forms: +// +// PCMPEQW xmm xmm +// PCMPEQW m128 xmm +func PCMPEQW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPEQW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPEQW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PCMPEQW: bad operands") +} + +// PCMPESTRI: Packed Compare Explicit Length Strings, Return Index. +// +// Forms: +// +// PCMPESTRI imm8 xmm xmm +// PCMPESTRI imm8 m128 xmm +func PCMPESTRI(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPESTRI", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x, reg.EAX, reg.EDX}, + Outputs: []operand.Op{reg.ECX}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPESTRI", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x, reg.EAX, reg.EDX}, + Outputs: []operand.Op{reg.ECX}, + }, nil + } + return nil, errors.New("PCMPESTRI: bad operands") +} + +// PCMPESTRM: Packed Compare Explicit Length Strings, Return Mask. +// +// Forms: +// +// PCMPESTRM imm8 xmm xmm +// PCMPESTRM imm8 m128 xmm +func PCMPESTRM(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPESTRM", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x, reg.EAX, reg.EDX}, + Outputs: []operand.Op{reg.X0}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPESTRM", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x, reg.EAX, reg.EDX}, + Outputs: []operand.Op{reg.X0}, + }, nil + } + return nil, errors.New("PCMPESTRM: bad operands") +} + +// PCMPGTB: Compare Packed Signed Byte Integers for Greater Than. +// +// Forms: +// +// PCMPGTB xmm xmm +// PCMPGTB m128 xmm +func PCMPGTB(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPGTB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPGTB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PCMPGTB: bad operands") +} + +// PCMPGTL: Compare Packed Signed Doubleword Integers for Greater Than. +// +// Forms: +// +// PCMPGTL xmm xmm +// PCMPGTL m128 xmm +func PCMPGTL(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPGTL", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPGTL", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PCMPGTL: bad operands") +} + +// PCMPGTQ: Compare Packed Data for Greater Than. +// +// Forms: +// +// PCMPGTQ xmm xmm +// PCMPGTQ m128 xmm +func PCMPGTQ(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPGTQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPGTQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PCMPGTQ: bad operands") +} + +// PCMPGTW: Compare Packed Signed Word Integers for Greater Than. +// +// Forms: +// +// PCMPGTW xmm xmm +// PCMPGTW m128 xmm +func PCMPGTW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPGTW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPGTW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PCMPGTW: bad operands") +} + +// PCMPISTRI: Packed Compare Implicit Length Strings, Return Index. +// +// Forms: +// +// PCMPISTRI imm8 xmm xmm +// PCMPISTRI imm8 m128 xmm +func PCMPISTRI(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPISTRI", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{reg.ECX}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPISTRI", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{reg.ECX}, + }, nil + } + return nil, errors.New("PCMPISTRI: bad operands") +} + +// PCMPISTRM: Packed Compare Implicit Length Strings, Return Mask. +// +// Forms: +// +// PCMPISTRM imm8 xmm xmm +// PCMPISTRM imm8 m128 xmm +func PCMPISTRM(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPISTRM", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{reg.X0}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PCMPISTRM", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{reg.X0}, + }, nil + } + return nil, errors.New("PCMPISTRM: bad operands") +} + +// PDEPL: Parallel Bits Deposit. +// +// Forms: +// +// PDEPL r32 r32 r32 +// PDEPL m32 r32 r32 +func PDEPL(mr, r, r1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r) && operand.IsR32(r1): + return &intrep.Instruction{ + Opcode: "PDEPL", + Operands: []operand.Op{mr, r, r1}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r1}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r) && operand.IsR32(r1): + return &intrep.Instruction{ + Opcode: "PDEPL", + Operands: []operand.Op{mr, r, r1}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r1}, + }, nil + } + return nil, errors.New("PDEPL: bad operands") +} + +// PDEPQ: Parallel Bits Deposit. +// +// Forms: +// +// PDEPQ r64 r64 r64 +// PDEPQ m64 r64 r64 +func PDEPQ(mr, r, r1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r) && operand.IsR64(r1): + return &intrep.Instruction{ + Opcode: "PDEPQ", + Operands: []operand.Op{mr, r, r1}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r1}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r) && operand.IsR64(r1): + return &intrep.Instruction{ + Opcode: "PDEPQ", + Operands: []operand.Op{mr, r, r1}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r1}, + }, nil + } + return nil, errors.New("PDEPQ: bad operands") +} + +// PEXTL: Parallel Bits Extract. +// +// Forms: +// +// PEXTL r32 r32 r32 +// PEXTL m32 r32 r32 +func PEXTL(mr, r, r1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r) && operand.IsR32(r1): + return &intrep.Instruction{ + Opcode: "PEXTL", + Operands: []operand.Op{mr, r, r1}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r1}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r) && operand.IsR32(r1): + return &intrep.Instruction{ + Opcode: "PEXTL", + Operands: []operand.Op{mr, r, r1}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r1}, + }, nil + } + return nil, errors.New("PEXTL: bad operands") +} + +// PEXTQ: Parallel Bits Extract. +// +// Forms: +// +// PEXTQ r64 r64 r64 +// PEXTQ m64 r64 r64 +func PEXTQ(mr, r, r1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r) && operand.IsR64(r1): + return &intrep.Instruction{ + Opcode: "PEXTQ", + Operands: []operand.Op{mr, r, r1}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r1}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r) && operand.IsR64(r1): + return &intrep.Instruction{ + Opcode: "PEXTQ", + Operands: []operand.Op{mr, r, r1}, + Inputs: []operand.Op{mr, r}, + Outputs: []operand.Op{r1}, + }, nil + } + return nil, errors.New("PEXTQ: bad operands") +} + +// PEXTRB: Extract Byte. +// +// Forms: +// +// PEXTRB imm8 xmm r32 +// PEXTRB imm8 xmm m8 +func PEXTRB(i, x, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(x) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "PEXTRB", + Operands: []operand.Op{i, x, mr}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(i) && operand.IsXMM(x) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "PEXTRB", + Operands: []operand.Op{i, x, mr}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("PEXTRB: bad operands") +} + +// PEXTRD: Extract Doubleword. +// +// Forms: +// +// PEXTRD imm8 xmm r32 +// PEXTRD imm8 xmm m32 +func PEXTRD(i, x, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(x) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "PEXTRD", + Operands: []operand.Op{i, x, mr}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(i) && operand.IsXMM(x) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "PEXTRD", + Operands: []operand.Op{i, x, mr}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("PEXTRD: bad operands") +} + +// PEXTRQ: Extract Quadword. +// +// Forms: +// +// PEXTRQ imm8 xmm r64 +// PEXTRQ imm8 xmm m64 +func PEXTRQ(i, x, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(x) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "PEXTRQ", + Operands: []operand.Op{i, x, mr}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(i) && operand.IsXMM(x) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "PEXTRQ", + Operands: []operand.Op{i, x, mr}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("PEXTRQ: bad operands") +} + +// PEXTRW: Extract Word. +// +// Forms: +// +// PEXTRW imm8 xmm r32 +// PEXTRW imm8 xmm m16 +func PEXTRW(i, x, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(x) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "PEXTRW", + Operands: []operand.Op{i, x, mr}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(i) && operand.IsXMM(x) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "PEXTRW", + Operands: []operand.Op{i, x, mr}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("PEXTRW: bad operands") +} + +// PHADDD: Packed Horizontal Add Doubleword Integer. +// +// Forms: +// +// PHADDD xmm xmm +// PHADDD m128 xmm +func PHADDD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PHADDD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PHADDD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PHADDD: bad operands") +} + +// PHADDSW: Packed Horizontal Add Signed Word Integers with Signed Saturation. +// +// Forms: +// +// PHADDSW xmm xmm +// PHADDSW m128 xmm +func PHADDSW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PHADDSW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PHADDSW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PHADDSW: bad operands") +} + +// PHADDW: Packed Horizontal Add Word Integers. +// +// Forms: +// +// PHADDW xmm xmm +// PHADDW m128 xmm +func PHADDW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PHADDW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PHADDW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PHADDW: bad operands") +} + +// PHMINPOSUW: Packed Horizontal Minimum of Unsigned Word Integers. +// +// Forms: +// +// PHMINPOSUW xmm xmm +// PHMINPOSUW m128 xmm +func PHMINPOSUW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PHMINPOSUW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PHMINPOSUW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PHMINPOSUW: bad operands") +} + +// PHSUBD: Packed Horizontal Subtract Doubleword Integers. +// +// Forms: +// +// PHSUBD xmm xmm +// PHSUBD m128 xmm +func PHSUBD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PHSUBD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PHSUBD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PHSUBD: bad operands") +} + +// PHSUBSW: Packed Horizontal Subtract Signed Word Integers with Signed Saturation. +// +// Forms: +// +// PHSUBSW xmm xmm +// PHSUBSW m128 xmm +func PHSUBSW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PHSUBSW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PHSUBSW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PHSUBSW: bad operands") +} + +// PHSUBW: Packed Horizontal Subtract Word Integers. +// +// Forms: +// +// PHSUBW xmm xmm +// PHSUBW m128 xmm +func PHSUBW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PHSUBW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PHSUBW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PHSUBW: bad operands") +} + +// PINSRB: Insert Byte. +// +// Forms: +// +// PINSRB imm8 r32 xmm +// PINSRB imm8 m8 xmm +func PINSRB(i, mr, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsR32(mr) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PINSRB", + Operands: []operand.Op{i, mr, x}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM8(mr) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PINSRB", + Operands: []operand.Op{i, mr, x}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PINSRB: bad operands") +} + +// PINSRD: Insert Doubleword. +// +// Forms: +// +// PINSRD imm8 r32 xmm +// PINSRD imm8 m32 xmm +func PINSRD(i, mr, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsR32(mr) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PINSRD", + Operands: []operand.Op{i, mr, x}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM32(mr) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PINSRD", + Operands: []operand.Op{i, mr, x}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PINSRD: bad operands") +} + +// PINSRQ: Insert Quadword. +// +// Forms: +// +// PINSRQ imm8 r64 xmm +// PINSRQ imm8 m64 xmm +func PINSRQ(i, mr, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsR64(mr) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PINSRQ", + Operands: []operand.Op{i, mr, x}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM64(mr) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PINSRQ", + Operands: []operand.Op{i, mr, x}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PINSRQ: bad operands") +} + +// PINSRW: Insert Word. +// +// Forms: +// +// PINSRW imm8 r32 xmm +// PINSRW imm8 m16 xmm +func PINSRW(i, mr, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsR32(mr) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PINSRW", + Operands: []operand.Op{i, mr, x}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM16(mr) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PINSRW", + Operands: []operand.Op{i, mr, x}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PINSRW: bad operands") +} + +// PMADDUBSW: Multiply and Add Packed Signed and Unsigned Byte Integers. +// +// Forms: +// +// PMADDUBSW xmm xmm +// PMADDUBSW m128 xmm +func PMADDUBSW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMADDUBSW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMADDUBSW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMADDUBSW: bad operands") +} + +// PMADDWL: Multiply and Add Packed Signed Word Integers. +// +// Forms: +// +// PMADDWL xmm xmm +// PMADDWL m128 xmm +func PMADDWL(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMADDWL", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMADDWL", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMADDWL: bad operands") +} + +// PMAXSB: Maximum of Packed Signed Byte Integers. +// +// Forms: +// +// PMAXSB xmm xmm +// PMAXSB m128 xmm +func PMAXSB(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMAXSB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMAXSB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMAXSB: bad operands") +} + +// PMAXSD: Maximum of Packed Signed Doubleword Integers. +// +// Forms: +// +// PMAXSD xmm xmm +// PMAXSD m128 xmm +func PMAXSD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMAXSD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMAXSD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMAXSD: bad operands") +} + +// PMAXSW: Maximum of Packed Signed Word Integers. +// +// Forms: +// +// PMAXSW xmm xmm +// PMAXSW m128 xmm +func PMAXSW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMAXSW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMAXSW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMAXSW: bad operands") +} + +// PMAXUB: Maximum of Packed Unsigned Byte Integers. +// +// Forms: +// +// PMAXUB xmm xmm +// PMAXUB m128 xmm +func PMAXUB(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMAXUB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMAXUB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMAXUB: bad operands") +} + +// PMAXUD: Maximum of Packed Unsigned Doubleword Integers. +// +// Forms: +// +// PMAXUD xmm xmm +// PMAXUD m128 xmm +func PMAXUD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMAXUD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMAXUD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMAXUD: bad operands") +} + +// PMAXUW: Maximum of Packed Unsigned Word Integers. +// +// Forms: +// +// PMAXUW xmm xmm +// PMAXUW m128 xmm +func PMAXUW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMAXUW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMAXUW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMAXUW: bad operands") +} + +// PMINSB: Minimum of Packed Signed Byte Integers. +// +// Forms: +// +// PMINSB xmm xmm +// PMINSB m128 xmm +func PMINSB(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMINSB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMINSB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMINSB: bad operands") +} + +// PMINSD: Minimum of Packed Signed Doubleword Integers. +// +// Forms: +// +// PMINSD xmm xmm +// PMINSD m128 xmm +func PMINSD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMINSD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMINSD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMINSD: bad operands") +} + +// PMINSW: Minimum of Packed Signed Word Integers. +// +// Forms: +// +// PMINSW xmm xmm +// PMINSW m128 xmm +func PMINSW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMINSW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMINSW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMINSW: bad operands") +} + +// PMINUB: Minimum of Packed Unsigned Byte Integers. +// +// Forms: +// +// PMINUB xmm xmm +// PMINUB m128 xmm +func PMINUB(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMINUB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMINUB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMINUB: bad operands") +} + +// PMINUD: Minimum of Packed Unsigned Doubleword Integers. +// +// Forms: +// +// PMINUD xmm xmm +// PMINUD m128 xmm +func PMINUD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMINUD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMINUD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMINUD: bad operands") +} + +// PMINUW: Minimum of Packed Unsigned Word Integers. +// +// Forms: +// +// PMINUW xmm xmm +// PMINUW m128 xmm +func PMINUW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMINUW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMINUW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMINUW: bad operands") +} + +// PMOVMSKB: Move Byte Mask. +// +// Forms: +// +// PMOVMSKB xmm r32 +func PMOVMSKB(x, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(x) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "PMOVMSKB", + Operands: []operand.Op{x, r}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("PMOVMSKB: bad operands") +} + +// PMOVSXBD: Move Packed Byte Integers to Doubleword Integers with Sign Extension. +// +// Forms: +// +// PMOVSXBD xmm xmm +// PMOVSXBD m32 xmm +func PMOVSXBD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVSXBD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVSXBD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMOVSXBD: bad operands") +} + +// PMOVSXBQ: Move Packed Byte Integers to Quadword Integers with Sign Extension. +// +// Forms: +// +// PMOVSXBQ xmm xmm +// PMOVSXBQ m16 xmm +func PMOVSXBQ(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVSXBQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM16(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVSXBQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMOVSXBQ: bad operands") +} + +// PMOVSXBW: Move Packed Byte Integers to Word Integers with Sign Extension. +// +// Forms: +// +// PMOVSXBW xmm xmm +// PMOVSXBW m64 xmm +func PMOVSXBW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVSXBW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVSXBW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMOVSXBW: bad operands") +} + +// PMOVSXDQ: Move Packed Doubleword Integers to Quadword Integers with Sign Extension. +// +// Forms: +// +// PMOVSXDQ xmm xmm +// PMOVSXDQ m64 xmm +func PMOVSXDQ(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVSXDQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVSXDQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMOVSXDQ: bad operands") +} + +// PMOVSXWD: Move Packed Word Integers to Doubleword Integers with Sign Extension. +// +// Forms: +// +// PMOVSXWD xmm xmm +// PMOVSXWD m64 xmm +func PMOVSXWD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVSXWD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVSXWD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMOVSXWD: bad operands") +} + +// PMOVSXWQ: Move Packed Word Integers to Quadword Integers with Sign Extension. +// +// Forms: +// +// PMOVSXWQ xmm xmm +// PMOVSXWQ m32 xmm +func PMOVSXWQ(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVSXWQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVSXWQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMOVSXWQ: bad operands") +} + +// PMOVZXBD: Move Packed Byte Integers to Doubleword Integers with Zero Extension. +// +// Forms: +// +// PMOVZXBD xmm xmm +// PMOVZXBD m32 xmm +func PMOVZXBD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVZXBD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVZXBD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMOVZXBD: bad operands") +} + +// PMOVZXBQ: Move Packed Byte Integers to Quadword Integers with Zero Extension. +// +// Forms: +// +// PMOVZXBQ xmm xmm +// PMOVZXBQ m16 xmm +func PMOVZXBQ(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVZXBQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM16(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVZXBQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMOVZXBQ: bad operands") +} + +// PMOVZXBW: Move Packed Byte Integers to Word Integers with Zero Extension. +// +// Forms: +// +// PMOVZXBW xmm xmm +// PMOVZXBW m64 xmm +func PMOVZXBW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVZXBW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVZXBW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMOVZXBW: bad operands") +} + +// PMOVZXDQ: Move Packed Doubleword Integers to Quadword Integers with Zero Extension. +// +// Forms: +// +// PMOVZXDQ xmm xmm +// PMOVZXDQ m64 xmm +func PMOVZXDQ(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVZXDQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVZXDQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMOVZXDQ: bad operands") +} + +// PMOVZXWD: Move Packed Word Integers to Doubleword Integers with Zero Extension. +// +// Forms: +// +// PMOVZXWD xmm xmm +// PMOVZXWD m64 xmm +func PMOVZXWD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVZXWD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVZXWD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMOVZXWD: bad operands") +} + +// PMOVZXWQ: Move Packed Word Integers to Quadword Integers with Zero Extension. +// +// Forms: +// +// PMOVZXWQ xmm xmm +// PMOVZXWQ m32 xmm +func PMOVZXWQ(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVZXWQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMOVZXWQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMOVZXWQ: bad operands") +} + +// PMULDQ: Multiply Packed Signed Doubleword Integers and Store Quadword Result. +// +// Forms: +// +// PMULDQ xmm xmm +// PMULDQ m128 xmm +func PMULDQ(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMULDQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMULDQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMULDQ: bad operands") +} + +// PMULHRSW: Packed Multiply Signed Word Integers and Store High Result with Round and Scale. +// +// Forms: +// +// PMULHRSW xmm xmm +// PMULHRSW m128 xmm +func PMULHRSW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMULHRSW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMULHRSW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMULHRSW: bad operands") +} + +// PMULHUW: Multiply Packed Unsigned Word Integers and Store High Result. +// +// Forms: +// +// PMULHUW xmm xmm +// PMULHUW m128 xmm +func PMULHUW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMULHUW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMULHUW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMULHUW: bad operands") +} + +// PMULHW: Multiply Packed Signed Word Integers and Store High Result. +// +// Forms: +// +// PMULHW xmm xmm +// PMULHW m128 xmm +func PMULHW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMULHW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMULHW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMULHW: bad operands") +} + +// PMULLD: Multiply Packed Signed Doubleword Integers and Store Low Result. +// +// Forms: +// +// PMULLD xmm xmm +// PMULLD m128 xmm +func PMULLD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMULLD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMULLD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMULLD: bad operands") +} + +// PMULLW: Multiply Packed Signed Word Integers and Store Low Result. +// +// Forms: +// +// PMULLW xmm xmm +// PMULLW m128 xmm +func PMULLW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMULLW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMULLW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMULLW: bad operands") +} + +// PMULULQ: Multiply Packed Unsigned Doubleword Integers. +// +// Forms: +// +// PMULULQ xmm xmm +// PMULULQ m128 xmm +func PMULULQ(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMULULQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PMULULQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PMULULQ: bad operands") +} + +// POPCNTL: Count of Number of Bits Set to 1. +// +// Forms: +// +// POPCNTL r32 r32 +// POPCNTL m32 r32 +func POPCNTL(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "POPCNTL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "POPCNTL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("POPCNTL: bad operands") +} + +// POPCNTQ: Count of Number of Bits Set to 1. +// +// Forms: +// +// POPCNTQ r64 r64 +// POPCNTQ m64 r64 +func POPCNTQ(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "POPCNTQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "POPCNTQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("POPCNTQ: bad operands") +} + +// POPCNTW: Count of Number of Bits Set to 1. +// +// Forms: +// +// POPCNTW r16 r16 +// POPCNTW m16 r16 +func POPCNTW(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "POPCNTW", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "POPCNTW", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("POPCNTW: bad operands") +} + +// POPQ: Pop a Value from the Stack. +// +// Forms: +// +// POPQ r64 +// POPQ m64 +func POPQ(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "POPQ", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "POPQ", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("POPQ: bad operands") +} + +// POPW: Pop a Value from the Stack. +// +// Forms: +// +// POPW r16 +// POPW m16 +func POPW(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "POPW", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "POPW", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("POPW: bad operands") +} + +// POR: Packed Bitwise Logical OR. +// +// Forms: +// +// POR xmm xmm +// POR m128 xmm +func POR(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "POR", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "POR", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("POR: bad operands") +} + +// PREFETCHNTA: Prefetch Data Into Caches using NTA Hint. +// +// Forms: +// +// PREFETCHNTA m8 +func PREFETCHNTA(m operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM8(m): + return &intrep.Instruction{ + Opcode: "PREFETCHNTA", + Operands: []operand.Op{m}, + Inputs: []operand.Op{m}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("PREFETCHNTA: bad operands") +} + +// PREFETCHT0: Prefetch Data Into Caches using T0 Hint. +// +// Forms: +// +// PREFETCHT0 m8 +func PREFETCHT0(m operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM8(m): + return &intrep.Instruction{ + Opcode: "PREFETCHT0", + Operands: []operand.Op{m}, + Inputs: []operand.Op{m}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("PREFETCHT0: bad operands") +} + +// PREFETCHT1: Prefetch Data Into Caches using T1 Hint. +// +// Forms: +// +// PREFETCHT1 m8 +func PREFETCHT1(m operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM8(m): + return &intrep.Instruction{ + Opcode: "PREFETCHT1", + Operands: []operand.Op{m}, + Inputs: []operand.Op{m}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("PREFETCHT1: bad operands") +} + +// PREFETCHT2: Prefetch Data Into Caches using T2 Hint. +// +// Forms: +// +// PREFETCHT2 m8 +func PREFETCHT2(m operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM8(m): + return &intrep.Instruction{ + Opcode: "PREFETCHT2", + Operands: []operand.Op{m}, + Inputs: []operand.Op{m}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("PREFETCHT2: bad operands") +} + +// PSADBW: Compute Sum of Absolute Differences. +// +// Forms: +// +// PSADBW xmm xmm +// PSADBW m128 xmm +func PSADBW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSADBW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSADBW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSADBW: bad operands") +} + +// PSHUFB: Packed Shuffle Bytes. +// +// Forms: +// +// PSHUFB xmm xmm +// PSHUFB m128 xmm +func PSHUFB(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSHUFB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSHUFB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSHUFB: bad operands") +} + +// PSHUFD: Shuffle Packed Doublewords. +// +// Forms: +// +// PSHUFD imm8 xmm xmm +// PSHUFD imm8 m128 xmm +func PSHUFD(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSHUFD", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSHUFD", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSHUFD: bad operands") +} + +// PSHUFHW: Shuffle Packed High Words. +// +// Forms: +// +// PSHUFHW imm8 xmm xmm +// PSHUFHW imm8 m128 xmm +func PSHUFHW(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSHUFHW", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSHUFHW", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSHUFHW: bad operands") +} + +// PSHUFL: Shuffle Packed Doublewords. +// +// Forms: +// +// PSHUFL imm8 xmm xmm +// PSHUFL imm8 m128 xmm +func PSHUFL(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSHUFL", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSHUFL", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSHUFL: bad operands") +} + +// PSHUFLW: Shuffle Packed Low Words. +// +// Forms: +// +// PSHUFLW imm8 xmm xmm +// PSHUFLW imm8 m128 xmm +func PSHUFLW(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSHUFLW", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSHUFLW", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSHUFLW: bad operands") +} + +// PSIGNB: Packed Sign of Byte Integers. +// +// Forms: +// +// PSIGNB xmm xmm +// PSIGNB m128 xmm +func PSIGNB(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSIGNB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSIGNB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSIGNB: bad operands") +} + +// PSIGND: Packed Sign of Doubleword Integers. +// +// Forms: +// +// PSIGND xmm xmm +// PSIGND m128 xmm +func PSIGND(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSIGND", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSIGND", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSIGND: bad operands") +} + +// PSIGNW: Packed Sign of Word Integers. +// +// Forms: +// +// PSIGNW xmm xmm +// PSIGNW m128 xmm +func PSIGNW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSIGNW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSIGNW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSIGNW: bad operands") +} + +// PSLLDQ: Shift Packed Double Quadword Left Logical. +// +// Forms: +// +// PSLLDQ imm8 xmm +func PSLLDQ(i, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSLLDQ", + Operands: []operand.Op{i, x}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSLLDQ: bad operands") +} + +// PSLLL: Shift Packed Doubleword Data Left Logical. +// +// Forms: +// +// PSLLL imm8 xmm +// PSLLL xmm xmm +// PSLLL m128 xmm +func PSLLL(imx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSLLL", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsXMM(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSLLL", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{imx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSLLL", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{imx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSLLL: bad operands") +} + +// PSLLO: Shift Packed Double Quadword Left Logical. +// +// Forms: +// +// PSLLO imm8 xmm +func PSLLO(i, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSLLO", + Operands: []operand.Op{i, x}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSLLO: bad operands") +} + +// PSLLQ: Shift Packed Quadword Data Left Logical. +// +// Forms: +// +// PSLLQ imm8 xmm +// PSLLQ xmm xmm +// PSLLQ m128 xmm +func PSLLQ(imx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSLLQ", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsXMM(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSLLQ", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{imx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSLLQ", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{imx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSLLQ: bad operands") +} + +// PSLLW: Shift Packed Word Data Left Logical. +// +// Forms: +// +// PSLLW imm8 xmm +// PSLLW xmm xmm +// PSLLW m128 xmm +func PSLLW(imx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSLLW", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsXMM(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSLLW", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{imx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSLLW", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{imx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSLLW: bad operands") +} + +// PSRAL: Shift Packed Doubleword Data Right Arithmetic. +// +// Forms: +// +// PSRAL imm8 xmm +// PSRAL xmm xmm +// PSRAL m128 xmm +func PSRAL(imx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSRAL", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsXMM(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSRAL", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{imx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSRAL", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{imx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSRAL: bad operands") +} + +// PSRAW: Shift Packed Word Data Right Arithmetic. +// +// Forms: +// +// PSRAW imm8 xmm +// PSRAW xmm xmm +// PSRAW m128 xmm +func PSRAW(imx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSRAW", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsXMM(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSRAW", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{imx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSRAW", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{imx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSRAW: bad operands") +} + +// PSRLDQ: Shift Packed Double Quadword Right Logical. +// +// Forms: +// +// PSRLDQ imm8 xmm +func PSRLDQ(i, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSRLDQ", + Operands: []operand.Op{i, x}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSRLDQ: bad operands") +} + +// PSRLL: Shift Packed Doubleword Data Right Logical. +// +// Forms: +// +// PSRLL imm8 xmm +// PSRLL xmm xmm +// PSRLL m128 xmm +func PSRLL(imx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSRLL", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsXMM(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSRLL", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{imx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSRLL", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{imx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSRLL: bad operands") +} + +// PSRLO: Shift Packed Double Quadword Right Logical. +// +// Forms: +// +// PSRLO imm8 xmm +func PSRLO(i, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSRLO", + Operands: []operand.Op{i, x}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSRLO: bad operands") +} + +// PSRLQ: Shift Packed Quadword Data Right Logical. +// +// Forms: +// +// PSRLQ imm8 xmm +// PSRLQ xmm xmm +// PSRLQ m128 xmm +func PSRLQ(imx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSRLQ", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsXMM(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSRLQ", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{imx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSRLQ", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{imx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSRLQ: bad operands") +} + +// PSRLW: Shift Packed Word Data Right Logical. +// +// Forms: +// +// PSRLW imm8 xmm +// PSRLW xmm xmm +// PSRLW m128 xmm +func PSRLW(imx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSRLW", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsXMM(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSRLW", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{imx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(imx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSRLW", + Operands: []operand.Op{imx, x}, + Inputs: []operand.Op{imx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSRLW: bad operands") +} + +// PSUBB: Subtract Packed Byte Integers. +// +// Forms: +// +// PSUBB xmm xmm +// PSUBB m128 xmm +func PSUBB(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSUBB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSUBB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSUBB: bad operands") +} + +// PSUBL: Subtract Packed Doubleword Integers. +// +// Forms: +// +// PSUBL xmm xmm +// PSUBL m128 xmm +func PSUBL(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSUBL", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSUBL", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSUBL: bad operands") +} + +// PSUBQ: Subtract Packed Quadword Integers. +// +// Forms: +// +// PSUBQ xmm xmm +// PSUBQ m128 xmm +func PSUBQ(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSUBQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSUBQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSUBQ: bad operands") +} + +// PSUBSB: Subtract Packed Signed Byte Integers with Signed Saturation. +// +// Forms: +// +// PSUBSB xmm xmm +// PSUBSB m128 xmm +func PSUBSB(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSUBSB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSUBSB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSUBSB: bad operands") +} + +// PSUBSW: Subtract Packed Signed Word Integers with Signed Saturation. +// +// Forms: +// +// PSUBSW xmm xmm +// PSUBSW m128 xmm +func PSUBSW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSUBSW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSUBSW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSUBSW: bad operands") +} + +// PSUBUSB: Subtract Packed Unsigned Byte Integers with Unsigned Saturation. +// +// Forms: +// +// PSUBUSB xmm xmm +// PSUBUSB m128 xmm +func PSUBUSB(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSUBUSB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSUBUSB", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSUBUSB: bad operands") +} + +// PSUBUSW: Subtract Packed Unsigned Word Integers with Unsigned Saturation. +// +// Forms: +// +// PSUBUSW xmm xmm +// PSUBUSW m128 xmm +func PSUBUSW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSUBUSW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSUBUSW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSUBUSW: bad operands") +} + +// PSUBW: Subtract Packed Word Integers. +// +// Forms: +// +// PSUBW xmm xmm +// PSUBW m128 xmm +func PSUBW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSUBW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PSUBW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PSUBW: bad operands") +} + +// PTEST: Packed Logical Compare. +// +// Forms: +// +// PTEST xmm xmm +// PTEST m128 xmm +func PTEST(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PTEST", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PTEST", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("PTEST: bad operands") +} + +// PUNPCKHBW: Unpack and Interleave High-Order Bytes into Words. +// +// Forms: +// +// PUNPCKHBW xmm xmm +// PUNPCKHBW m128 xmm +func PUNPCKHBW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PUNPCKHBW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PUNPCKHBW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PUNPCKHBW: bad operands") +} + +// PUNPCKHLQ: Unpack and Interleave High-Order Doublewords into Quadwords. +// +// Forms: +// +// PUNPCKHLQ xmm xmm +// PUNPCKHLQ m128 xmm +func PUNPCKHLQ(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PUNPCKHLQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PUNPCKHLQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PUNPCKHLQ: bad operands") +} + +// PUNPCKHQDQ: Unpack and Interleave High-Order Quadwords into Double Quadwords. +// +// Forms: +// +// PUNPCKHQDQ xmm xmm +// PUNPCKHQDQ m128 xmm +func PUNPCKHQDQ(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PUNPCKHQDQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PUNPCKHQDQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PUNPCKHQDQ: bad operands") +} + +// PUNPCKHWL: Unpack and Interleave High-Order Words into Doublewords. +// +// Forms: +// +// PUNPCKHWL xmm xmm +// PUNPCKHWL m128 xmm +func PUNPCKHWL(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PUNPCKHWL", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PUNPCKHWL", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PUNPCKHWL: bad operands") +} + +// PUNPCKLBW: Unpack and Interleave Low-Order Bytes into Words. +// +// Forms: +// +// PUNPCKLBW xmm xmm +// PUNPCKLBW m128 xmm +func PUNPCKLBW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PUNPCKLBW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PUNPCKLBW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PUNPCKLBW: bad operands") +} + +// PUNPCKLLQ: Unpack and Interleave Low-Order Doublewords into Quadwords. +// +// Forms: +// +// PUNPCKLLQ xmm xmm +// PUNPCKLLQ m128 xmm +func PUNPCKLLQ(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PUNPCKLLQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PUNPCKLLQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PUNPCKLLQ: bad operands") +} + +// PUNPCKLQDQ: Unpack and Interleave Low-Order Quadwords into Double Quadwords. +// +// Forms: +// +// PUNPCKLQDQ xmm xmm +// PUNPCKLQDQ m128 xmm +func PUNPCKLQDQ(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PUNPCKLQDQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PUNPCKLQDQ", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PUNPCKLQDQ: bad operands") +} + +// PUNPCKLWL: Unpack and Interleave Low-Order Words into Doublewords. +// +// Forms: +// +// PUNPCKLWL xmm xmm +// PUNPCKLWL m128 xmm +func PUNPCKLWL(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PUNPCKLWL", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PUNPCKLWL", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PUNPCKLWL: bad operands") +} + +// PUSHQ: Push Value Onto the Stack. +// +// Forms: +// +// PUSHQ imm8 +// PUSHQ imm32 +// PUSHQ r64 +// PUSHQ m64 +func PUSHQ(imr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imr): + return &intrep.Instruction{ + Opcode: "PUSHQ", + Operands: []operand.Op{imr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + }, nil + case operand.IsIMM32(imr): + return &intrep.Instruction{ + Opcode: "PUSHQ", + Operands: []operand.Op{imr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR64(imr): + return &intrep.Instruction{ + Opcode: "PUSHQ", + Operands: []operand.Op{imr}, + Inputs: []operand.Op{imr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM64(imr): + return &intrep.Instruction{ + Opcode: "PUSHQ", + Operands: []operand.Op{imr}, + Inputs: []operand.Op{imr}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("PUSHQ: bad operands") +} + +// PUSHW: Push Value Onto the Stack. +// +// Forms: +// +// PUSHW r16 +// PUSHW m16 +func PUSHW(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "PUSHW", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "PUSHW", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("PUSHW: bad operands") +} + +// PXOR: Packed Bitwise Logical Exclusive OR. +// +// Forms: +// +// PXOR xmm xmm +// PXOR m128 xmm +func PXOR(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PXOR", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "PXOR", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("PXOR: bad operands") +} + +// RCLB: Rotate Left through Carry Flag. +// +// Forms: +// +// RCLB 1 r8 +// RCLB imm8 r8 +// RCLB cl r8 +// RCLB 1 m8 +// RCLB imm8 m8 +// RCLB cl m8 +func RCLB(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "RCLB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "RCLB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "RCLB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "RCLB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "RCLB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "RCLB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("RCLB: bad operands") +} + +// RCLL: Rotate Left through Carry Flag. +// +// Forms: +// +// RCLL 1 r32 +// RCLL imm8 r32 +// RCLL cl r32 +// RCLL 1 m32 +// RCLL imm8 m32 +// RCLL cl m32 +func RCLL(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "RCLL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "RCLL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "RCLL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "RCLL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "RCLL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "RCLL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("RCLL: bad operands") +} + +// RCLQ: Rotate Left through Carry Flag. +// +// Forms: +// +// RCLQ 1 r64 +// RCLQ imm8 r64 +// RCLQ cl r64 +// RCLQ 1 m64 +// RCLQ imm8 m64 +// RCLQ cl m64 +func RCLQ(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "RCLQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "RCLQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "RCLQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "RCLQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "RCLQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "RCLQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("RCLQ: bad operands") +} + +// RCLW: Rotate Left through Carry Flag. +// +// Forms: +// +// RCLW 1 r16 +// RCLW imm8 r16 +// RCLW cl r16 +// RCLW 1 m16 +// RCLW imm8 m16 +// RCLW cl m16 +func RCLW(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "RCLW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "RCLW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "RCLW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "RCLW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "RCLW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "RCLW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("RCLW: bad operands") +} + +// RCPPS: Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// RCPPS xmm xmm +// RCPPS m128 xmm +func RCPPS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "RCPPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "RCPPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("RCPPS: bad operands") +} + +// RCPSS: Compute Approximate Reciprocal of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// RCPSS xmm xmm +// RCPSS m32 xmm +func RCPSS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "RCPSS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "RCPSS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("RCPSS: bad operands") +} + +// RCRB: Rotate Right through Carry Flag. +// +// Forms: +// +// RCRB 1 r8 +// RCRB imm8 r8 +// RCRB cl r8 +// RCRB 1 m8 +// RCRB imm8 m8 +// RCRB cl m8 +func RCRB(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "RCRB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "RCRB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "RCRB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "RCRB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "RCRB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "RCRB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("RCRB: bad operands") +} + +// RCRL: Rotate Right through Carry Flag. +// +// Forms: +// +// RCRL 1 r32 +// RCRL imm8 r32 +// RCRL cl r32 +// RCRL 1 m32 +// RCRL imm8 m32 +// RCRL cl m32 +func RCRL(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "RCRL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "RCRL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "RCRL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "RCRL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "RCRL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "RCRL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("RCRL: bad operands") +} + +// RCRQ: Rotate Right through Carry Flag. +// +// Forms: +// +// RCRQ 1 r64 +// RCRQ imm8 r64 +// RCRQ cl r64 +// RCRQ 1 m64 +// RCRQ imm8 m64 +// RCRQ cl m64 +func RCRQ(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "RCRQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "RCRQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "RCRQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "RCRQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "RCRQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "RCRQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("RCRQ: bad operands") +} + +// RCRW: Rotate Right through Carry Flag. +// +// Forms: +// +// RCRW 1 r16 +// RCRW imm8 r16 +// RCRW cl r16 +// RCRW 1 m16 +// RCRW imm8 m16 +// RCRW cl m16 +func RCRW(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "RCRW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "RCRW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "RCRW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "RCRW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "RCRW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "RCRW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("RCRW: bad operands") +} + +// RDRANDL: Read Random Number. +// +// Forms: +// +// RDRANDL r32 +func RDRANDL(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "RDRANDL", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("RDRANDL: bad operands") +} + +// RDRANDQ: Read Random Number. +// +// Forms: +// +// RDRANDQ r64 +func RDRANDQ(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "RDRANDQ", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("RDRANDQ: bad operands") +} + +// RDRANDW: Read Random Number. +// +// Forms: +// +// RDRANDW r16 +func RDRANDW(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "RDRANDW", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("RDRANDW: bad operands") +} + +// RDSEEDL: Read Random SEED. +// +// Forms: +// +// RDSEEDL r32 +func RDSEEDL(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "RDSEEDL", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("RDSEEDL: bad operands") +} + +// RDSEEDQ: Read Random SEED. +// +// Forms: +// +// RDSEEDQ r64 +func RDSEEDQ(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "RDSEEDQ", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("RDSEEDQ: bad operands") +} + +// RDSEEDW: Read Random SEED. +// +// Forms: +// +// RDSEEDW r16 +func RDSEEDW(r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "RDSEEDW", + Operands: []operand.Op{r}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("RDSEEDW: bad operands") +} + +// RDTSC: Read Time-Stamp Counter. +// +// Forms: +// +// RDTSC +func RDTSC() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "RDTSC", + Operands: nil, + Inputs: []operand.Op{}, + Outputs: []operand.Op{reg.EAX, reg.EDX}, + }, nil +} + +// RDTSCP: Read Time-Stamp Counter and Processor ID. +// +// Forms: +// +// RDTSCP +func RDTSCP() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "RDTSCP", + Operands: nil, + Inputs: []operand.Op{}, + Outputs: []operand.Op{reg.EAX, reg.ECX, reg.EDX}, + }, nil +} + +// RET: Return from Procedure. +// +// Forms: +// +// RET +func RET() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "RET", + Operands: nil, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + }, nil +} + +// RETFL: Return from Procedure. +// +// Forms: +// +// RETFL imm16 +func RETFL(i operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM16(i): + return &intrep.Instruction{ + Opcode: "RETFL", + Operands: []operand.Op{i}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("RETFL: bad operands") +} + +// RETFQ: Return from Procedure. +// +// Forms: +// +// RETFQ imm16 +func RETFQ(i operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM16(i): + return &intrep.Instruction{ + Opcode: "RETFQ", + Operands: []operand.Op{i}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("RETFQ: bad operands") +} + +// RETFW: Return from Procedure. +// +// Forms: +// +// RETFW imm16 +func RETFW(i operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM16(i): + return &intrep.Instruction{ + Opcode: "RETFW", + Operands: []operand.Op{i}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("RETFW: bad operands") +} + +// ROLB: Rotate Left. +// +// Forms: +// +// ROLB 1 r8 +// ROLB imm8 r8 +// ROLB cl r8 +// ROLB 1 m8 +// ROLB imm8 m8 +// ROLB cl m8 +func ROLB(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "ROLB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "ROLB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "ROLB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "ROLB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "ROLB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "ROLB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("ROLB: bad operands") +} + +// ROLL: Rotate Left. +// +// Forms: +// +// ROLL 1 r32 +// ROLL imm8 r32 +// ROLL cl r32 +// ROLL 1 m32 +// ROLL imm8 m32 +// ROLL cl m32 +func ROLL(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "ROLL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "ROLL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "ROLL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "ROLL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "ROLL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "ROLL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("ROLL: bad operands") +} + +// ROLQ: Rotate Left. +// +// Forms: +// +// ROLQ 1 r64 +// ROLQ imm8 r64 +// ROLQ cl r64 +// ROLQ 1 m64 +// ROLQ imm8 m64 +// ROLQ cl m64 +func ROLQ(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "ROLQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "ROLQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "ROLQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "ROLQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "ROLQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "ROLQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("ROLQ: bad operands") +} + +// ROLW: Rotate Left. +// +// Forms: +// +// ROLW 1 r16 +// ROLW imm8 r16 +// ROLW cl r16 +// ROLW 1 m16 +// ROLW imm8 m16 +// ROLW cl m16 +func ROLW(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "ROLW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "ROLW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "ROLW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "ROLW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "ROLW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "ROLW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("ROLW: bad operands") +} + +// RORB: Rotate Right. +// +// Forms: +// +// RORB 1 r8 +// RORB imm8 r8 +// RORB cl r8 +// RORB 1 m8 +// RORB imm8 m8 +// RORB cl m8 +func RORB(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "RORB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "RORB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "RORB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "RORB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "RORB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "RORB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("RORB: bad operands") +} + +// RORL: Rotate Right. +// +// Forms: +// +// RORL 1 r32 +// RORL imm8 r32 +// RORL cl r32 +// RORL 1 m32 +// RORL imm8 m32 +// RORL cl m32 +func RORL(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "RORL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "RORL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "RORL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "RORL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "RORL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "RORL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("RORL: bad operands") +} + +// RORQ: Rotate Right. +// +// Forms: +// +// RORQ 1 r64 +// RORQ imm8 r64 +// RORQ cl r64 +// RORQ 1 m64 +// RORQ imm8 m64 +// RORQ cl m64 +func RORQ(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "RORQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "RORQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "RORQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "RORQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "RORQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "RORQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("RORQ: bad operands") +} + +// RORW: Rotate Right. +// +// Forms: +// +// RORW 1 r16 +// RORW imm8 r16 +// RORW cl r16 +// RORW 1 m16 +// RORW imm8 m16 +// RORW cl m16 +func RORW(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "RORW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "RORW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "RORW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "RORW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "RORW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "RORW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("RORW: bad operands") +} + +// RORXL: Rotate Right Logical Without Affecting Flags. +// +// Forms: +// +// RORXL imm8 r32 r32 +// RORXL imm8 m32 r32 +func RORXL(i, mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "RORXL", + Operands: []operand.Op{i, mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsIMM8(i) && operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "RORXL", + Operands: []operand.Op{i, mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("RORXL: bad operands") +} + +// RORXQ: Rotate Right Logical Without Affecting Flags. +// +// Forms: +// +// RORXQ imm8 r64 r64 +// RORXQ imm8 m64 r64 +func RORXQ(i, mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "RORXQ", + Operands: []operand.Op{i, mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsIMM8(i) && operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "RORXQ", + Operands: []operand.Op{i, mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("RORXQ: bad operands") +} + +// ROUNDPD: Round Packed Double Precision Floating-Point Values. +// +// Forms: +// +// ROUNDPD imm8 xmm xmm +// ROUNDPD imm8 m128 xmm +func ROUNDPD(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ROUNDPD", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ROUNDPD", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("ROUNDPD: bad operands") +} + +// ROUNDPS: Round Packed Single Precision Floating-Point Values. +// +// Forms: +// +// ROUNDPS imm8 xmm xmm +// ROUNDPS imm8 m128 xmm +func ROUNDPS(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ROUNDPS", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ROUNDPS", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("ROUNDPS: bad operands") +} + +// ROUNDSD: Round Scalar Double Precision Floating-Point Values. +// +// Forms: +// +// ROUNDSD imm8 xmm xmm +// ROUNDSD imm8 m64 xmm +func ROUNDSD(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ROUNDSD", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM64(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ROUNDSD", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("ROUNDSD: bad operands") +} + +// ROUNDSS: Round Scalar Single Precision Floating-Point Values. +// +// Forms: +// +// ROUNDSS imm8 xmm xmm +// ROUNDSS imm8 m32 xmm +func ROUNDSS(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ROUNDSS", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM32(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "ROUNDSS", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("ROUNDSS: bad operands") +} + +// RSQRTPS: Compute Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// RSQRTPS xmm xmm +// RSQRTPS m128 xmm +func RSQRTPS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "RSQRTPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "RSQRTPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("RSQRTPS: bad operands") +} + +// RSQRTSS: Compute Reciprocal of Square Root of Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// RSQRTSS xmm xmm +// RSQRTSS m32 xmm +func RSQRTSS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "RSQRTSS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "RSQRTSS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("RSQRTSS: bad operands") +} + +// SALB: Arithmetic Shift Left. +// +// Forms: +// +// SALB 1 r8 +// SALB imm8 r8 +// SALB cl r8 +// SALB 1 m8 +// SALB imm8 m8 +// SALB cl m8 +func SALB(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SALB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SALB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SALB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SALB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SALB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SALB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SALB: bad operands") +} + +// SALL: Arithmetic Shift Left. +// +// Forms: +// +// SALL 1 r32 +// SALL imm8 r32 +// SALL cl r32 +// SALL 1 m32 +// SALL imm8 m32 +// SALL cl m32 +func SALL(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "SALL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "SALL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "SALL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "SALL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "SALL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "SALL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SALL: bad operands") +} + +// SALQ: Arithmetic Shift Left. +// +// Forms: +// +// SALQ 1 r64 +// SALQ imm8 r64 +// SALQ cl r64 +// SALQ 1 m64 +// SALQ imm8 m64 +// SALQ cl m64 +func SALQ(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "SALQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "SALQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "SALQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "SALQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "SALQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "SALQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SALQ: bad operands") +} + +// SALW: Arithmetic Shift Left. +// +// Forms: +// +// SALW 1 r16 +// SALW imm8 r16 +// SALW cl r16 +// SALW 1 m16 +// SALW imm8 m16 +// SALW cl m16 +func SALW(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "SALW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "SALW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "SALW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "SALW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "SALW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "SALW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SALW: bad operands") +} + +// SARB: Arithmetic Shift Right. +// +// Forms: +// +// SARB 1 r8 +// SARB imm8 r8 +// SARB cl r8 +// SARB 1 m8 +// SARB imm8 m8 +// SARB cl m8 +func SARB(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SARB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SARB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SARB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SARB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SARB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SARB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SARB: bad operands") +} + +// SARL: Arithmetic Shift Right. +// +// Forms: +// +// SARL 1 r32 +// SARL imm8 r32 +// SARL cl r32 +// SARL 1 m32 +// SARL imm8 m32 +// SARL cl m32 +func SARL(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "SARL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "SARL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "SARL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "SARL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "SARL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "SARL", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SARL: bad operands") +} + +// SARQ: Arithmetic Shift Right. +// +// Forms: +// +// SARQ 1 r64 +// SARQ imm8 r64 +// SARQ cl r64 +// SARQ 1 m64 +// SARQ imm8 m64 +// SARQ cl m64 +func SARQ(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "SARQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "SARQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "SARQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "SARQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "SARQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "SARQ", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SARQ: bad operands") +} + +// SARW: Arithmetic Shift Right. +// +// Forms: +// +// SARW 1 r16 +// SARW imm8 r16 +// SARW cl r16 +// SARW 1 m16 +// SARW imm8 m16 +// SARW cl m16 +func SARW(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "SARW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "SARW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "SARW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "SARW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "SARW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "SARW", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SARW: bad operands") +} + +// SARXL: Arithmetic Shift Right Without Affecting Flags. +// +// Forms: +// +// SARXL r32 r32 r32 +// SARXL r32 m32 r32 +func SARXL(r, mr, r1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(r) && operand.IsR32(mr) && operand.IsR32(r1): + return &intrep.Instruction{ + Opcode: "SARXL", + Operands: []operand.Op{r, mr, r1}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r1}, + }, nil + case operand.IsR32(r) && operand.IsM32(mr) && operand.IsR32(r1): + return &intrep.Instruction{ + Opcode: "SARXL", + Operands: []operand.Op{r, mr, r1}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r1}, + }, nil + } + return nil, errors.New("SARXL: bad operands") +} + +// SARXQ: Arithmetic Shift Right Without Affecting Flags. +// +// Forms: +// +// SARXQ r64 r64 r64 +// SARXQ r64 m64 r64 +func SARXQ(r, mr, r1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(r) && operand.IsR64(mr) && operand.IsR64(r1): + return &intrep.Instruction{ + Opcode: "SARXQ", + Operands: []operand.Op{r, mr, r1}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r1}, + }, nil + case operand.IsR64(r) && operand.IsM64(mr) && operand.IsR64(r1): + return &intrep.Instruction{ + Opcode: "SARXQ", + Operands: []operand.Op{r, mr, r1}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r1}, + }, nil + } + return nil, errors.New("SARXQ: bad operands") +} + +// SBBB: Subtract with Borrow. +// +// Forms: +// +// SBBB imm8 al +// SBBB imm8 r8 +// SBBB r8 r8 +// SBBB m8 r8 +// SBBB imm8 m8 +// SBBB r8 m8 +func SBBB(imr, amr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imr) && operand.IsAL(amr): + return &intrep.Instruction{ + Opcode: "SBBB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR8(amr): + return &intrep.Instruction{ + Opcode: "SBBB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR8(imr) && operand.IsR8(amr): + return &intrep.Instruction{ + Opcode: "SBBB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsM8(imr) && operand.IsR8(amr): + return &intrep.Instruction{ + Opcode: "SBBB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM8(amr): + return &intrep.Instruction{ + Opcode: "SBBB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR8(imr) && operand.IsM8(amr): + return &intrep.Instruction{ + Opcode: "SBBB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + } + return nil, errors.New("SBBB: bad operands") +} + +// SBBL: Subtract with Borrow. +// +// Forms: +// +// SBBL imm32 eax +// SBBL imm8 r32 +// SBBL imm32 r32 +// SBBL r32 r32 +// SBBL m32 r32 +// SBBL imm8 m32 +// SBBL imm32 m32 +// SBBL r32 m32 +func SBBL(imr, emr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM32(imr) && operand.IsEAX(emr): + return &intrep.Instruction{ + Opcode: "SBBL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "SBBL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM32(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "SBBL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsR32(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "SBBL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{imr, emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsM32(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "SBBL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{imr, emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM32(emr): + return &intrep.Instruction{ + Opcode: "SBBL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM32(imr) && operand.IsM32(emr): + return &intrep.Instruction{ + Opcode: "SBBL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsR32(imr) && operand.IsM32(emr): + return &intrep.Instruction{ + Opcode: "SBBL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{imr, emr}, + Outputs: []operand.Op{emr}, + }, nil + } + return nil, errors.New("SBBL: bad operands") +} + +// SBBQ: Subtract with Borrow. +// +// Forms: +// +// SBBQ imm32 rax +// SBBQ imm8 r64 +// SBBQ imm32 r64 +// SBBQ r64 r64 +// SBBQ m64 r64 +// SBBQ imm8 m64 +// SBBQ imm32 m64 +// SBBQ r64 m64 +func SBBQ(imr, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM32(imr) && operand.IsRAX(mr): + return &intrep.Instruction{ + Opcode: "SBBQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "SBBQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM32(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "SBBQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR64(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "SBBQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM64(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "SBBQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "SBBQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM32(imr) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "SBBQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR64(imr) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "SBBQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SBBQ: bad operands") +} + +// SBBW: Subtract with Borrow. +// +// Forms: +// +// SBBW imm16 ax +// SBBW imm8 r16 +// SBBW imm16 r16 +// SBBW r16 r16 +// SBBW m16 r16 +// SBBW imm8 m16 +// SBBW imm16 m16 +// SBBW r16 m16 +func SBBW(imr, amr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM16(imr) && operand.IsAX(amr): + return &intrep.Instruction{ + Opcode: "SBBW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "SBBW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM16(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "SBBW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR16(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "SBBW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsM16(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "SBBW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM16(amr): + return &intrep.Instruction{ + Opcode: "SBBW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM16(imr) && operand.IsM16(amr): + return &intrep.Instruction{ + Opcode: "SBBW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR16(imr) && operand.IsM16(amr): + return &intrep.Instruction{ + Opcode: "SBBW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + } + return nil, errors.New("SBBW: bad operands") +} + +// SETCC: Set byte if above or equal (CF == 0). +// +// Forms: +// +// SETCC r8 +// SETCC m8 +func SETCC(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SETCC", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SETCC", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SETCC: bad operands") +} + +// SETCS: Set byte if below (CF == 1). +// +// Forms: +// +// SETCS r8 +// SETCS m8 +func SETCS(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SETCS", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SETCS", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SETCS: bad operands") +} + +// SETEQ: Set byte if equal (ZF == 1). +// +// Forms: +// +// SETEQ r8 +// SETEQ m8 +func SETEQ(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SETEQ", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SETEQ", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SETEQ: bad operands") +} + +// SETGE: Set byte if greater or equal (SF == OF). +// +// Forms: +// +// SETGE r8 +// SETGE m8 +func SETGE(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SETGE", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SETGE", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SETGE: bad operands") +} + +// SETGT: Set byte if greater (ZF == 0 and SF == OF). +// +// Forms: +// +// SETGT r8 +// SETGT m8 +func SETGT(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SETGT", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SETGT", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SETGT: bad operands") +} + +// SETHI: Set byte if above (CF == 0 and ZF == 0). +// +// Forms: +// +// SETHI r8 +// SETHI m8 +func SETHI(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SETHI", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SETHI", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SETHI: bad operands") +} + +// SETLE: Set byte if less or equal (ZF == 1 or SF != OF). +// +// Forms: +// +// SETLE r8 +// SETLE m8 +func SETLE(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SETLE", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SETLE", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SETLE: bad operands") +} + +// SETLS: Set byte if below or equal (CF == 1 or ZF == 1). +// +// Forms: +// +// SETLS r8 +// SETLS m8 +func SETLS(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SETLS", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SETLS", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SETLS: bad operands") +} + +// SETLT: Set byte if less (SF != OF). +// +// Forms: +// +// SETLT r8 +// SETLT m8 +func SETLT(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SETLT", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SETLT", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SETLT: bad operands") +} + +// SETMI: Set byte if sign (SF == 1). +// +// Forms: +// +// SETMI r8 +// SETMI m8 +func SETMI(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SETMI", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SETMI", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SETMI: bad operands") +} + +// SETNE: Set byte if not equal (ZF == 0). +// +// Forms: +// +// SETNE r8 +// SETNE m8 +func SETNE(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SETNE", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SETNE", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SETNE: bad operands") +} + +// SETOC: Set byte if not overflow (OF == 0). +// +// Forms: +// +// SETOC r8 +// SETOC m8 +func SETOC(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SETOC", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SETOC", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SETOC: bad operands") +} + +// SETOS: Set byte if overflow (OF == 1). +// +// Forms: +// +// SETOS r8 +// SETOS m8 +func SETOS(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SETOS", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SETOS", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SETOS: bad operands") +} + +// SETPC: Set byte if not parity (PF == 0). +// +// Forms: +// +// SETPC r8 +// SETPC m8 +func SETPC(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SETPC", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SETPC", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SETPC: bad operands") +} + +// SETPL: Set byte if not sign (SF == 0). +// +// Forms: +// +// SETPL r8 +// SETPL m8 +func SETPL(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SETPL", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SETPL", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SETPL: bad operands") +} + +// SETPS: Set byte if parity (PF == 1). +// +// Forms: +// +// SETPS r8 +// SETPS m8 +func SETPS(mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SETPS", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SETPS", + Operands: []operand.Op{mr}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SETPS: bad operands") +} + +// SFENCE: Store Fence. +// +// Forms: +// +// SFENCE +func SFENCE() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "SFENCE", + Operands: nil, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + }, nil +} + +// SHA1MSG1: Perform an Intermediate Calculation for the Next Four SHA1 Message Doublewords. +// +// Forms: +// +// SHA1MSG1 xmm xmm +// SHA1MSG1 m128 xmm +func SHA1MSG1(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SHA1MSG1", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SHA1MSG1", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("SHA1MSG1: bad operands") +} + +// SHA1MSG2: Perform a Final Calculation for the Next Four SHA1 Message Doublewords. +// +// Forms: +// +// SHA1MSG2 xmm xmm +// SHA1MSG2 m128 xmm +func SHA1MSG2(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SHA1MSG2", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SHA1MSG2", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("SHA1MSG2: bad operands") +} + +// SHA1NEXTE: Calculate SHA1 State Variable E after Four Rounds. +// +// Forms: +// +// SHA1NEXTE xmm xmm +// SHA1NEXTE m128 xmm +func SHA1NEXTE(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SHA1NEXTE", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SHA1NEXTE", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("SHA1NEXTE: bad operands") +} + +// SHA1RNDS4: Perform Four Rounds of SHA1 Operation. +// +// Forms: +// +// SHA1RNDS4 imm2u xmm xmm +// SHA1RNDS4 imm2u m128 xmm +func SHA1RNDS4(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM2U(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SHA1RNDS4", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM2U(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SHA1RNDS4", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("SHA1RNDS4: bad operands") +} + +// SHA256MSG1: Perform an Intermediate Calculation for the Next Four SHA256 Message Doublewords. +// +// Forms: +// +// SHA256MSG1 xmm xmm +// SHA256MSG1 m128 xmm +func SHA256MSG1(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SHA256MSG1", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SHA256MSG1", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("SHA256MSG1: bad operands") +} + +// SHA256MSG2: Perform a Final Calculation for the Next Four SHA256 Message Doublewords. +// +// Forms: +// +// SHA256MSG2 xmm xmm +// SHA256MSG2 m128 xmm +func SHA256MSG2(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SHA256MSG2", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SHA256MSG2", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("SHA256MSG2: bad operands") +} + +// SHA256RNDS2: Perform Two Rounds of SHA256 Operation. +// +// Forms: +// +// SHA256RNDS2 xmm0 xmm xmm +// SHA256RNDS2 xmm0 m128 xmm +func SHA256RNDS2(x, mx, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM0(x) && operand.IsXMM(mx) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "SHA256RNDS2", + Operands: []operand.Op{x, mx, x1}, + Inputs: []operand.Op{x, mx, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsXMM0(x) && operand.IsM128(mx) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "SHA256RNDS2", + Operands: []operand.Op{x, mx, x1}, + Inputs: []operand.Op{x, mx, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("SHA256RNDS2: bad operands") +} + +// SHLB: Logical Shift Left. +// +// Forms: +// +// SHLB 1 r8 +// SHLB imm8 r8 +// SHLB cl r8 +// SHLB 1 m8 +// SHLB imm8 m8 +// SHLB cl m8 +func SHLB(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SHLB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SHLB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SHLB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SHLB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SHLB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SHLB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SHLB: bad operands") +} + +// SHLL: Logical Shift Left. +// +// Forms: +// +// SHLL 1 r32 +// SHLL imm8 r32 +// SHLL cl r32 +// SHLL 1 m32 +// SHLL imm8 m32 +// SHLL cl m32 +// SHLL imm8 r32 r32 +// SHLL cl r32 r32 +// SHLL imm8 r32 m32 +// SHLL cl r32 m32 +func SHLL(ops ...operand.Op) (*intrep.Instruction, error) { + switch { + case len(ops) == 2 && operand.Is1(ops[0]) && operand.IsR32(ops[1]): + return &intrep.Instruction{ + Opcode: "SHLL", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsIMM8(ops[0]) && operand.IsR32(ops[1]): + return &intrep.Instruction{ + Opcode: "SHLL", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsCL(ops[0]) && operand.IsR32(ops[1]): + return &intrep.Instruction{ + Opcode: "SHLL", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.Is1(ops[0]) && operand.IsM32(ops[1]): + return &intrep.Instruction{ + Opcode: "SHLL", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsIMM8(ops[0]) && operand.IsM32(ops[1]): + return &intrep.Instruction{ + Opcode: "SHLL", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsCL(ops[0]) && operand.IsM32(ops[1]): + return &intrep.Instruction{ + Opcode: "SHLL", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 3 && operand.IsIMM8(ops[0]) && operand.IsR32(ops[1]) && operand.IsR32(ops[2]): + return &intrep.Instruction{ + Opcode: "SHLL", + Operands: ops, + Inputs: []operand.Op{ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + case len(ops) == 3 && operand.IsCL(ops[0]) && operand.IsR32(ops[1]) && operand.IsR32(ops[2]): + return &intrep.Instruction{ + Opcode: "SHLL", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + case len(ops) == 3 && operand.IsIMM8(ops[0]) && operand.IsR32(ops[1]) && operand.IsM32(ops[2]): + return &intrep.Instruction{ + Opcode: "SHLL", + Operands: ops, + Inputs: []operand.Op{ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + case len(ops) == 3 && operand.IsCL(ops[0]) && operand.IsR32(ops[1]) && operand.IsM32(ops[2]): + return &intrep.Instruction{ + Opcode: "SHLL", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + } + return nil, errors.New("SHLL: bad operands") +} + +// SHLQ: Logical Shift Left. +// +// Forms: +// +// SHLQ 1 r64 +// SHLQ imm8 r64 +// SHLQ cl r64 +// SHLQ 1 m64 +// SHLQ imm8 m64 +// SHLQ cl m64 +// SHLQ imm8 r64 r64 +// SHLQ cl r64 r64 +// SHLQ imm8 r64 m64 +// SHLQ cl r64 m64 +func SHLQ(ops ...operand.Op) (*intrep.Instruction, error) { + switch { + case len(ops) == 2 && operand.Is1(ops[0]) && operand.IsR64(ops[1]): + return &intrep.Instruction{ + Opcode: "SHLQ", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsIMM8(ops[0]) && operand.IsR64(ops[1]): + return &intrep.Instruction{ + Opcode: "SHLQ", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsCL(ops[0]) && operand.IsR64(ops[1]): + return &intrep.Instruction{ + Opcode: "SHLQ", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.Is1(ops[0]) && operand.IsM64(ops[1]): + return &intrep.Instruction{ + Opcode: "SHLQ", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsIMM8(ops[0]) && operand.IsM64(ops[1]): + return &intrep.Instruction{ + Opcode: "SHLQ", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsCL(ops[0]) && operand.IsM64(ops[1]): + return &intrep.Instruction{ + Opcode: "SHLQ", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 3 && operand.IsIMM8(ops[0]) && operand.IsR64(ops[1]) && operand.IsR64(ops[2]): + return &intrep.Instruction{ + Opcode: "SHLQ", + Operands: ops, + Inputs: []operand.Op{ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + case len(ops) == 3 && operand.IsCL(ops[0]) && operand.IsR64(ops[1]) && operand.IsR64(ops[2]): + return &intrep.Instruction{ + Opcode: "SHLQ", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + case len(ops) == 3 && operand.IsIMM8(ops[0]) && operand.IsR64(ops[1]) && operand.IsM64(ops[2]): + return &intrep.Instruction{ + Opcode: "SHLQ", + Operands: ops, + Inputs: []operand.Op{ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + case len(ops) == 3 && operand.IsCL(ops[0]) && operand.IsR64(ops[1]) && operand.IsM64(ops[2]): + return &intrep.Instruction{ + Opcode: "SHLQ", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + } + return nil, errors.New("SHLQ: bad operands") +} + +// SHLW: Logical Shift Left. +// +// Forms: +// +// SHLW 1 r16 +// SHLW imm8 r16 +// SHLW cl r16 +// SHLW 1 m16 +// SHLW imm8 m16 +// SHLW cl m16 +// SHLW imm8 r16 r16 +// SHLW cl r16 r16 +// SHLW imm8 r16 m16 +// SHLW cl r16 m16 +func SHLW(ops ...operand.Op) (*intrep.Instruction, error) { + switch { + case len(ops) == 2 && operand.Is1(ops[0]) && operand.IsR16(ops[1]): + return &intrep.Instruction{ + Opcode: "SHLW", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsIMM8(ops[0]) && operand.IsR16(ops[1]): + return &intrep.Instruction{ + Opcode: "SHLW", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsCL(ops[0]) && operand.IsR16(ops[1]): + return &intrep.Instruction{ + Opcode: "SHLW", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.Is1(ops[0]) && operand.IsM16(ops[1]): + return &intrep.Instruction{ + Opcode: "SHLW", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsIMM8(ops[0]) && operand.IsM16(ops[1]): + return &intrep.Instruction{ + Opcode: "SHLW", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsCL(ops[0]) && operand.IsM16(ops[1]): + return &intrep.Instruction{ + Opcode: "SHLW", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 3 && operand.IsIMM8(ops[0]) && operand.IsR16(ops[1]) && operand.IsR16(ops[2]): + return &intrep.Instruction{ + Opcode: "SHLW", + Operands: ops, + Inputs: []operand.Op{ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + case len(ops) == 3 && operand.IsCL(ops[0]) && operand.IsR16(ops[1]) && operand.IsR16(ops[2]): + return &intrep.Instruction{ + Opcode: "SHLW", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + case len(ops) == 3 && operand.IsIMM8(ops[0]) && operand.IsR16(ops[1]) && operand.IsM16(ops[2]): + return &intrep.Instruction{ + Opcode: "SHLW", + Operands: ops, + Inputs: []operand.Op{ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + case len(ops) == 3 && operand.IsCL(ops[0]) && operand.IsR16(ops[1]) && operand.IsM16(ops[2]): + return &intrep.Instruction{ + Opcode: "SHLW", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + } + return nil, errors.New("SHLW: bad operands") +} + +// SHLXL: Logical Shift Left Without Affecting Flags. +// +// Forms: +// +// SHLXL r32 r32 r32 +// SHLXL r32 m32 r32 +func SHLXL(r, mr, r1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(r) && operand.IsR32(mr) && operand.IsR32(r1): + return &intrep.Instruction{ + Opcode: "SHLXL", + Operands: []operand.Op{r, mr, r1}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r1}, + }, nil + case operand.IsR32(r) && operand.IsM32(mr) && operand.IsR32(r1): + return &intrep.Instruction{ + Opcode: "SHLXL", + Operands: []operand.Op{r, mr, r1}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r1}, + }, nil + } + return nil, errors.New("SHLXL: bad operands") +} + +// SHLXQ: Logical Shift Left Without Affecting Flags. +// +// Forms: +// +// SHLXQ r64 r64 r64 +// SHLXQ r64 m64 r64 +func SHLXQ(r, mr, r1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(r) && operand.IsR64(mr) && operand.IsR64(r1): + return &intrep.Instruction{ + Opcode: "SHLXQ", + Operands: []operand.Op{r, mr, r1}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r1}, + }, nil + case operand.IsR64(r) && operand.IsM64(mr) && operand.IsR64(r1): + return &intrep.Instruction{ + Opcode: "SHLXQ", + Operands: []operand.Op{r, mr, r1}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r1}, + }, nil + } + return nil, errors.New("SHLXQ: bad operands") +} + +// SHRB: Logical Shift Right. +// +// Forms: +// +// SHRB 1 r8 +// SHRB imm8 r8 +// SHRB cl r8 +// SHRB 1 m8 +// SHRB imm8 m8 +// SHRB cl m8 +func SHRB(ci, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.Is1(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SHRB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SHRB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "SHRB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.Is1(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SHRB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SHRB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsCL(ci) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "SHRB", + Operands: []operand.Op{ci, mr}, + Inputs: []operand.Op{ci, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SHRB: bad operands") +} + +// SHRL: Logical Shift Right. +// +// Forms: +// +// SHRL 1 r32 +// SHRL imm8 r32 +// SHRL cl r32 +// SHRL 1 m32 +// SHRL imm8 m32 +// SHRL cl m32 +// SHRL imm8 r32 r32 +// SHRL cl r32 r32 +// SHRL imm8 r32 m32 +// SHRL cl r32 m32 +func SHRL(ops ...operand.Op) (*intrep.Instruction, error) { + switch { + case len(ops) == 2 && operand.Is1(ops[0]) && operand.IsR32(ops[1]): + return &intrep.Instruction{ + Opcode: "SHRL", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsIMM8(ops[0]) && operand.IsR32(ops[1]): + return &intrep.Instruction{ + Opcode: "SHRL", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsCL(ops[0]) && operand.IsR32(ops[1]): + return &intrep.Instruction{ + Opcode: "SHRL", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.Is1(ops[0]) && operand.IsM32(ops[1]): + return &intrep.Instruction{ + Opcode: "SHRL", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsIMM8(ops[0]) && operand.IsM32(ops[1]): + return &intrep.Instruction{ + Opcode: "SHRL", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsCL(ops[0]) && operand.IsM32(ops[1]): + return &intrep.Instruction{ + Opcode: "SHRL", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 3 && operand.IsIMM8(ops[0]) && operand.IsR32(ops[1]) && operand.IsR32(ops[2]): + return &intrep.Instruction{ + Opcode: "SHRL", + Operands: ops, + Inputs: []operand.Op{ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + case len(ops) == 3 && operand.IsCL(ops[0]) && operand.IsR32(ops[1]) && operand.IsR32(ops[2]): + return &intrep.Instruction{ + Opcode: "SHRL", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + case len(ops) == 3 && operand.IsIMM8(ops[0]) && operand.IsR32(ops[1]) && operand.IsM32(ops[2]): + return &intrep.Instruction{ + Opcode: "SHRL", + Operands: ops, + Inputs: []operand.Op{ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + case len(ops) == 3 && operand.IsCL(ops[0]) && operand.IsR32(ops[1]) && operand.IsM32(ops[2]): + return &intrep.Instruction{ + Opcode: "SHRL", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + } + return nil, errors.New("SHRL: bad operands") +} + +// SHRQ: Logical Shift Right. +// +// Forms: +// +// SHRQ 1 r64 +// SHRQ imm8 r64 +// SHRQ cl r64 +// SHRQ 1 m64 +// SHRQ imm8 m64 +// SHRQ cl m64 +// SHRQ imm8 r64 r64 +// SHRQ cl r64 r64 +// SHRQ imm8 r64 m64 +// SHRQ cl r64 m64 +func SHRQ(ops ...operand.Op) (*intrep.Instruction, error) { + switch { + case len(ops) == 2 && operand.Is1(ops[0]) && operand.IsR64(ops[1]): + return &intrep.Instruction{ + Opcode: "SHRQ", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsIMM8(ops[0]) && operand.IsR64(ops[1]): + return &intrep.Instruction{ + Opcode: "SHRQ", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsCL(ops[0]) && operand.IsR64(ops[1]): + return &intrep.Instruction{ + Opcode: "SHRQ", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.Is1(ops[0]) && operand.IsM64(ops[1]): + return &intrep.Instruction{ + Opcode: "SHRQ", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsIMM8(ops[0]) && operand.IsM64(ops[1]): + return &intrep.Instruction{ + Opcode: "SHRQ", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsCL(ops[0]) && operand.IsM64(ops[1]): + return &intrep.Instruction{ + Opcode: "SHRQ", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 3 && operand.IsIMM8(ops[0]) && operand.IsR64(ops[1]) && operand.IsR64(ops[2]): + return &intrep.Instruction{ + Opcode: "SHRQ", + Operands: ops, + Inputs: []operand.Op{ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + case len(ops) == 3 && operand.IsCL(ops[0]) && operand.IsR64(ops[1]) && operand.IsR64(ops[2]): + return &intrep.Instruction{ + Opcode: "SHRQ", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + case len(ops) == 3 && operand.IsIMM8(ops[0]) && operand.IsR64(ops[1]) && operand.IsM64(ops[2]): + return &intrep.Instruction{ + Opcode: "SHRQ", + Operands: ops, + Inputs: []operand.Op{ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + case len(ops) == 3 && operand.IsCL(ops[0]) && operand.IsR64(ops[1]) && operand.IsM64(ops[2]): + return &intrep.Instruction{ + Opcode: "SHRQ", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + } + return nil, errors.New("SHRQ: bad operands") +} + +// SHRW: Logical Shift Right. +// +// Forms: +// +// SHRW 1 r16 +// SHRW imm8 r16 +// SHRW cl r16 +// SHRW 1 m16 +// SHRW imm8 m16 +// SHRW cl m16 +// SHRW imm8 r16 r16 +// SHRW cl r16 r16 +// SHRW imm8 r16 m16 +// SHRW cl r16 m16 +func SHRW(ops ...operand.Op) (*intrep.Instruction, error) { + switch { + case len(ops) == 2 && operand.Is1(ops[0]) && operand.IsR16(ops[1]): + return &intrep.Instruction{ + Opcode: "SHRW", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsIMM8(ops[0]) && operand.IsR16(ops[1]): + return &intrep.Instruction{ + Opcode: "SHRW", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsCL(ops[0]) && operand.IsR16(ops[1]): + return &intrep.Instruction{ + Opcode: "SHRW", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.Is1(ops[0]) && operand.IsM16(ops[1]): + return &intrep.Instruction{ + Opcode: "SHRW", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsIMM8(ops[0]) && operand.IsM16(ops[1]): + return &intrep.Instruction{ + Opcode: "SHRW", + Operands: ops, + Inputs: []operand.Op{ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsCL(ops[0]) && operand.IsM16(ops[1]): + return &intrep.Instruction{ + Opcode: "SHRW", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 3 && operand.IsIMM8(ops[0]) && operand.IsR16(ops[1]) && operand.IsR16(ops[2]): + return &intrep.Instruction{ + Opcode: "SHRW", + Operands: ops, + Inputs: []operand.Op{ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + case len(ops) == 3 && operand.IsCL(ops[0]) && operand.IsR16(ops[1]) && operand.IsR16(ops[2]): + return &intrep.Instruction{ + Opcode: "SHRW", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + case len(ops) == 3 && operand.IsIMM8(ops[0]) && operand.IsR16(ops[1]) && operand.IsM16(ops[2]): + return &intrep.Instruction{ + Opcode: "SHRW", + Operands: ops, + Inputs: []operand.Op{ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + case len(ops) == 3 && operand.IsCL(ops[0]) && operand.IsR16(ops[1]) && operand.IsM16(ops[2]): + return &intrep.Instruction{ + Opcode: "SHRW", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1], ops[2]}, + Outputs: []operand.Op{ops[2]}, + }, nil + } + return nil, errors.New("SHRW: bad operands") +} + +// SHRXL: Logical Shift Right Without Affecting Flags. +// +// Forms: +// +// SHRXL r32 r32 r32 +// SHRXL r32 m32 r32 +func SHRXL(r, mr, r1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(r) && operand.IsR32(mr) && operand.IsR32(r1): + return &intrep.Instruction{ + Opcode: "SHRXL", + Operands: []operand.Op{r, mr, r1}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r1}, + }, nil + case operand.IsR32(r) && operand.IsM32(mr) && operand.IsR32(r1): + return &intrep.Instruction{ + Opcode: "SHRXL", + Operands: []operand.Op{r, mr, r1}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r1}, + }, nil + } + return nil, errors.New("SHRXL: bad operands") +} + +// SHRXQ: Logical Shift Right Without Affecting Flags. +// +// Forms: +// +// SHRXQ r64 r64 r64 +// SHRXQ r64 m64 r64 +func SHRXQ(r, mr, r1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(r) && operand.IsR64(mr) && operand.IsR64(r1): + return &intrep.Instruction{ + Opcode: "SHRXQ", + Operands: []operand.Op{r, mr, r1}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r1}, + }, nil + case operand.IsR64(r) && operand.IsM64(mr) && operand.IsR64(r1): + return &intrep.Instruction{ + Opcode: "SHRXQ", + Operands: []operand.Op{r, mr, r1}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r1}, + }, nil + } + return nil, errors.New("SHRXQ: bad operands") +} + +// SHUFPD: Shuffle Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// SHUFPD imm8 xmm xmm +// SHUFPD imm8 m128 xmm +func SHUFPD(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SHUFPD", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SHUFPD", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("SHUFPD: bad operands") +} + +// SHUFPS: Shuffle Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// SHUFPS imm8 xmm xmm +// SHUFPS imm8 m128 xmm +func SHUFPS(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SHUFPS", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SHUFPS", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("SHUFPS: bad operands") +} + +// SQRTPD: Compute Square Roots of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// SQRTPD xmm xmm +// SQRTPD m128 xmm +func SQRTPD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SQRTPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SQRTPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("SQRTPD: bad operands") +} + +// SQRTPS: Compute Square Roots of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// SQRTPS xmm xmm +// SQRTPS m128 xmm +func SQRTPS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SQRTPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SQRTPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("SQRTPS: bad operands") +} + +// SQRTSD: Compute Square Root of Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// SQRTSD xmm xmm +// SQRTSD m64 xmm +func SQRTSD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SQRTSD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SQRTSD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("SQRTSD: bad operands") +} + +// SQRTSS: Compute Square Root of Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// SQRTSS xmm xmm +// SQRTSS m32 xmm +func SQRTSS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SQRTSS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SQRTSS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("SQRTSS: bad operands") +} + +// STC: Set Carry Flag. +// +// Forms: +// +// STC +func STC() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "STC", + Operands: nil, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + }, nil +} + +// STD: Set Direction Flag. +// +// Forms: +// +// STD +func STD() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "STD", + Operands: nil, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + }, nil +} + +// STMXCSR: Store MXCSR Register State. +// +// Forms: +// +// STMXCSR m32 +func STMXCSR(m operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM32(m): + return &intrep.Instruction{ + Opcode: "STMXCSR", + Operands: []operand.Op{m}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{m}, + }, nil + } + return nil, errors.New("STMXCSR: bad operands") +} + +// SUBB: Subtract. +// +// Forms: +// +// SUBB imm8 al +// SUBB imm8 r8 +// SUBB r8 r8 +// SUBB m8 r8 +// SUBB imm8 m8 +// SUBB r8 m8 +func SUBB(imr, amr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imr) && operand.IsAL(amr): + return &intrep.Instruction{ + Opcode: "SUBB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR8(amr): + return &intrep.Instruction{ + Opcode: "SUBB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR8(imr) && operand.IsR8(amr): + return &intrep.Instruction{ + Opcode: "SUBB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsM8(imr) && operand.IsR8(amr): + return &intrep.Instruction{ + Opcode: "SUBB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM8(amr): + return &intrep.Instruction{ + Opcode: "SUBB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR8(imr) && operand.IsM8(amr): + return &intrep.Instruction{ + Opcode: "SUBB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + } + return nil, errors.New("SUBB: bad operands") +} + +// SUBL: Subtract. +// +// Forms: +// +// SUBL imm32 eax +// SUBL imm8 r32 +// SUBL imm32 r32 +// SUBL r32 r32 +// SUBL m32 r32 +// SUBL imm8 m32 +// SUBL imm32 m32 +// SUBL r32 m32 +func SUBL(imr, emr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM32(imr) && operand.IsEAX(emr): + return &intrep.Instruction{ + Opcode: "SUBL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "SUBL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM32(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "SUBL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsR32(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "SUBL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{imr, emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsM32(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "SUBL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{imr, emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM32(emr): + return &intrep.Instruction{ + Opcode: "SUBL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM32(imr) && operand.IsM32(emr): + return &intrep.Instruction{ + Opcode: "SUBL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsR32(imr) && operand.IsM32(emr): + return &intrep.Instruction{ + Opcode: "SUBL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{imr, emr}, + Outputs: []operand.Op{emr}, + }, nil + } + return nil, errors.New("SUBL: bad operands") +} + +// SUBPD: Subtract Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// SUBPD xmm xmm +// SUBPD m128 xmm +func SUBPD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SUBPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SUBPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("SUBPD: bad operands") +} + +// SUBPS: Subtract Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// SUBPS xmm xmm +// SUBPS m128 xmm +func SUBPS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SUBPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SUBPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("SUBPS: bad operands") +} + +// SUBQ: Subtract. +// +// Forms: +// +// SUBQ imm32 rax +// SUBQ imm8 r64 +// SUBQ imm32 r64 +// SUBQ r64 r64 +// SUBQ m64 r64 +// SUBQ imm8 m64 +// SUBQ imm32 m64 +// SUBQ r64 m64 +func SUBQ(imr, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM32(imr) && operand.IsRAX(mr): + return &intrep.Instruction{ + Opcode: "SUBQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "SUBQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM32(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "SUBQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR64(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "SUBQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM64(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "SUBQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "SUBQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM32(imr) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "SUBQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR64(imr) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "SUBQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("SUBQ: bad operands") +} + +// SUBSD: Subtract Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// SUBSD xmm xmm +// SUBSD m64 xmm +func SUBSD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SUBSD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SUBSD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("SUBSD: bad operands") +} + +// SUBSS: Subtract Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// SUBSS xmm xmm +// SUBSS m32 xmm +func SUBSS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SUBSS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "SUBSS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("SUBSS: bad operands") +} + +// SUBW: Subtract. +// +// Forms: +// +// SUBW imm16 ax +// SUBW imm8 r16 +// SUBW imm16 r16 +// SUBW r16 r16 +// SUBW m16 r16 +// SUBW imm8 m16 +// SUBW imm16 m16 +// SUBW r16 m16 +func SUBW(imr, amr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM16(imr) && operand.IsAX(amr): + return &intrep.Instruction{ + Opcode: "SUBW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "SUBW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM16(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "SUBW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR16(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "SUBW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsM16(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "SUBW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM16(amr): + return &intrep.Instruction{ + Opcode: "SUBW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM16(imr) && operand.IsM16(amr): + return &intrep.Instruction{ + Opcode: "SUBW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR16(imr) && operand.IsM16(amr): + return &intrep.Instruction{ + Opcode: "SUBW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + } + return nil, errors.New("SUBW: bad operands") +} + +// SYSCALL: Fast System Call. +// +// Forms: +// +// SYSCALL +func SYSCALL() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "SYSCALL", + Operands: nil, + Inputs: []operand.Op{}, + Outputs: []operand.Op{reg.R11, reg.RCX}, + }, nil +} + +// TESTB: Logical Compare. +// +// Forms: +// +// TESTB imm8 al +// TESTB imm8 r8 +// TESTB r8 r8 +// TESTB imm8 m8 +// TESTB r8 m8 +func TESTB(ir, amr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(ir) && operand.IsAL(amr): + return &intrep.Instruction{ + Opcode: "TESTB", + Operands: []operand.Op{ir, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsIMM8(ir) && operand.IsR8(amr): + return &intrep.Instruction{ + Opcode: "TESTB", + Operands: []operand.Op{ir, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR8(ir) && operand.IsR8(amr): + return &intrep.Instruction{ + Opcode: "TESTB", + Operands: []operand.Op{ir, amr}, + Inputs: []operand.Op{ir, amr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsIMM8(ir) && operand.IsM8(amr): + return &intrep.Instruction{ + Opcode: "TESTB", + Operands: []operand.Op{ir, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR8(ir) && operand.IsM8(amr): + return &intrep.Instruction{ + Opcode: "TESTB", + Operands: []operand.Op{ir, amr}, + Inputs: []operand.Op{ir, amr}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("TESTB: bad operands") +} + +// TESTL: Logical Compare. +// +// Forms: +// +// TESTL imm32 eax +// TESTL imm32 r32 +// TESTL r32 r32 +// TESTL imm32 m32 +// TESTL r32 m32 +func TESTL(ir, emr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM32(ir) && operand.IsEAX(emr): + return &intrep.Instruction{ + Opcode: "TESTL", + Operands: []operand.Op{ir, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsIMM32(ir) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "TESTL", + Operands: []operand.Op{ir, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR32(ir) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "TESTL", + Operands: []operand.Op{ir, emr}, + Inputs: []operand.Op{ir, emr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsIMM32(ir) && operand.IsM32(emr): + return &intrep.Instruction{ + Opcode: "TESTL", + Operands: []operand.Op{ir, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR32(ir) && operand.IsM32(emr): + return &intrep.Instruction{ + Opcode: "TESTL", + Operands: []operand.Op{ir, emr}, + Inputs: []operand.Op{ir, emr}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("TESTL: bad operands") +} + +// TESTQ: Logical Compare. +// +// Forms: +// +// TESTQ imm32 rax +// TESTQ imm32 r64 +// TESTQ r64 r64 +// TESTQ imm32 m64 +// TESTQ r64 m64 +func TESTQ(ir, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM32(ir) && operand.IsRAX(mr): + return &intrep.Instruction{ + Opcode: "TESTQ", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsIMM32(ir) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "TESTQ", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR64(ir) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "TESTQ", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsIMM32(ir) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "TESTQ", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR64(ir) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "TESTQ", + Operands: []operand.Op{ir, mr}, + Inputs: []operand.Op{ir, mr}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("TESTQ: bad operands") +} + +// TESTW: Logical Compare. +// +// Forms: +// +// TESTW imm16 ax +// TESTW imm16 r16 +// TESTW r16 r16 +// TESTW imm16 m16 +// TESTW r16 m16 +func TESTW(ir, amr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM16(ir) && operand.IsAX(amr): + return &intrep.Instruction{ + Opcode: "TESTW", + Operands: []operand.Op{ir, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsIMM16(ir) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "TESTW", + Operands: []operand.Op{ir, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR16(ir) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "TESTW", + Operands: []operand.Op{ir, amr}, + Inputs: []operand.Op{ir, amr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsIMM16(ir) && operand.IsM16(amr): + return &intrep.Instruction{ + Opcode: "TESTW", + Operands: []operand.Op{ir, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{}, + }, nil + case operand.IsR16(ir) && operand.IsM16(amr): + return &intrep.Instruction{ + Opcode: "TESTW", + Operands: []operand.Op{ir, amr}, + Inputs: []operand.Op{ir, amr}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("TESTW: bad operands") +} + +// TZCNTL: Count the Number of Trailing Zero Bits. +// +// Forms: +// +// TZCNTL r32 r32 +// TZCNTL m32 r32 +func TZCNTL(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "TZCNTL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mr) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "TZCNTL", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("TZCNTL: bad operands") +} + +// TZCNTQ: Count the Number of Trailing Zero Bits. +// +// Forms: +// +// TZCNTQ r64 r64 +// TZCNTQ m64 r64 +func TZCNTQ(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "TZCNTQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mr) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "TZCNTQ", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("TZCNTQ: bad operands") +} + +// TZCNTW: Count the Number of Trailing Zero Bits. +// +// Forms: +// +// TZCNTW r16 r16 +// TZCNTW m16 r16 +func TZCNTW(mr, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "TZCNTW", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM16(mr) && operand.IsR16(r): + return &intrep.Instruction{ + Opcode: "TZCNTW", + Operands: []operand.Op{mr, r}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("TZCNTW: bad operands") +} + +// UCOMISD: Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// UCOMISD xmm xmm +// UCOMISD m64 xmm +func UCOMISD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "UCOMISD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "UCOMISD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("UCOMISD: bad operands") +} + +// UCOMISS: Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// UCOMISS xmm xmm +// UCOMISS m32 xmm +func UCOMISS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "UCOMISS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "UCOMISS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("UCOMISS: bad operands") +} + +// UD2: Undefined Instruction. +// +// Forms: +// +// UD2 +func UD2() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "UD2", + Operands: nil, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + }, nil +} + +// UNPCKHPD: Unpack and Interleave High Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// UNPCKHPD xmm xmm +// UNPCKHPD m128 xmm +func UNPCKHPD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "UNPCKHPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "UNPCKHPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("UNPCKHPD: bad operands") +} + +// UNPCKHPS: Unpack and Interleave High Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// UNPCKHPS xmm xmm +// UNPCKHPS m128 xmm +func UNPCKHPS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "UNPCKHPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "UNPCKHPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("UNPCKHPS: bad operands") +} + +// UNPCKLPD: Unpack and Interleave Low Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// UNPCKLPD xmm xmm +// UNPCKLPD m128 xmm +func UNPCKLPD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "UNPCKLPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "UNPCKLPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("UNPCKLPD: bad operands") +} + +// UNPCKLPS: Unpack and Interleave Low Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// UNPCKLPS xmm xmm +// UNPCKLPS m128 xmm +func UNPCKLPS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "UNPCKLPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "UNPCKLPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("UNPCKLPS: bad operands") +} + +// VADDPD: Add Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VADDPD xmm xmm xmm +// VADDPD m128 xmm xmm +// VADDPD ymm ymm ymm +// VADDPD m256 ymm ymm +func VADDPD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VADDPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VADDPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VADDPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VADDPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VADDPD: bad operands") +} + +// VADDPS: Add Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VADDPS xmm xmm xmm +// VADDPS m128 xmm xmm +// VADDPS ymm ymm ymm +// VADDPS m256 ymm ymm +func VADDPS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VADDPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VADDPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VADDPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VADDPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VADDPS: bad operands") +} + +// VADDSD: Add Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VADDSD xmm xmm xmm +// VADDSD m64 xmm xmm +func VADDSD(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VADDSD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VADDSD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VADDSD: bad operands") +} + +// VADDSS: Add Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VADDSS xmm xmm xmm +// VADDSS m32 xmm xmm +func VADDSS(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VADDSS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VADDSS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VADDSS: bad operands") +} + +// VADDSUBPD: Packed Double-FP Add/Subtract. +// +// Forms: +// +// VADDSUBPD xmm xmm xmm +// VADDSUBPD m128 xmm xmm +// VADDSUBPD ymm ymm ymm +// VADDSUBPD m256 ymm ymm +func VADDSUBPD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VADDSUBPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VADDSUBPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VADDSUBPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VADDSUBPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VADDSUBPD: bad operands") +} + +// VADDSUBPS: Packed Single-FP Add/Subtract. +// +// Forms: +// +// VADDSUBPS xmm xmm xmm +// VADDSUBPS m128 xmm xmm +// VADDSUBPS ymm ymm ymm +// VADDSUBPS m256 ymm ymm +func VADDSUBPS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VADDSUBPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VADDSUBPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VADDSUBPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VADDSUBPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VADDSUBPS: bad operands") +} + +// VAESDEC: Perform One Round of an AES Decryption Flow. +// +// Forms: +// +// VAESDEC xmm xmm xmm +// VAESDEC m128 xmm xmm +func VAESDEC(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VAESDEC", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VAESDEC", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VAESDEC: bad operands") +} + +// VAESDECLAST: Perform Last Round of an AES Decryption Flow. +// +// Forms: +// +// VAESDECLAST xmm xmm xmm +// VAESDECLAST m128 xmm xmm +func VAESDECLAST(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VAESDECLAST", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VAESDECLAST", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VAESDECLAST: bad operands") +} + +// VAESENC: Perform One Round of an AES Encryption Flow. +// +// Forms: +// +// VAESENC xmm xmm xmm +// VAESENC m128 xmm xmm +func VAESENC(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VAESENC", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VAESENC", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VAESENC: bad operands") +} + +// VAESENCLAST: Perform Last Round of an AES Encryption Flow. +// +// Forms: +// +// VAESENCLAST xmm xmm xmm +// VAESENCLAST m128 xmm xmm +func VAESENCLAST(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VAESENCLAST", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VAESENCLAST", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VAESENCLAST: bad operands") +} + +// VAESIMC: Perform the AES InvMixColumn Transformation. +// +// Forms: +// +// VAESIMC xmm xmm +// VAESIMC m128 xmm +func VAESIMC(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VAESIMC", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VAESIMC", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("VAESIMC: bad operands") +} + +// VAESKEYGENASSIST: AES Round Key Generation Assist. +// +// Forms: +// +// VAESKEYGENASSIST imm8 xmm xmm +// VAESKEYGENASSIST imm8 m128 xmm +func VAESKEYGENASSIST(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VAESKEYGENASSIST", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VAESKEYGENASSIST", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("VAESKEYGENASSIST: bad operands") +} + +// VANDNPD: Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VANDNPD xmm xmm xmm +// VANDNPD m128 xmm xmm +// VANDNPD ymm ymm ymm +// VANDNPD m256 ymm ymm +func VANDNPD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VANDNPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VANDNPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VANDNPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VANDNPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VANDNPD: bad operands") +} + +// VANDNPS: Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VANDNPS xmm xmm xmm +// VANDNPS m128 xmm xmm +// VANDNPS ymm ymm ymm +// VANDNPS m256 ymm ymm +func VANDNPS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VANDNPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VANDNPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VANDNPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VANDNPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VANDNPS: bad operands") +} + +// VANDPD: Bitwise Logical AND of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VANDPD xmm xmm xmm +// VANDPD m128 xmm xmm +// VANDPD ymm ymm ymm +// VANDPD m256 ymm ymm +func VANDPD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VANDPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VANDPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VANDPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VANDPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VANDPD: bad operands") +} + +// VANDPS: Bitwise Logical AND of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VANDPS xmm xmm xmm +// VANDPS m128 xmm xmm +// VANDPS ymm ymm ymm +// VANDPS m256 ymm ymm +func VANDPS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VANDPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VANDPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VANDPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VANDPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VANDPS: bad operands") +} + +// VBLENDPD: Blend Packed Double Precision Floating-Point Values. +// +// Forms: +// +// VBLENDPD imm8 xmm xmm xmm +// VBLENDPD imm8 m128 xmm xmm +// VBLENDPD imm8 ymm ymm ymm +// VBLENDPD imm8 m256 ymm ymm +func VBLENDPD(i, mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VBLENDPD", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VBLENDPD", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VBLENDPD", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VBLENDPD", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VBLENDPD: bad operands") +} + +// VBLENDPS: Blend Packed Single Precision Floating-Point Values. +// +// Forms: +// +// VBLENDPS imm8 xmm xmm xmm +// VBLENDPS imm8 m128 xmm xmm +// VBLENDPS imm8 ymm ymm ymm +// VBLENDPS imm8 m256 ymm ymm +func VBLENDPS(i, mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VBLENDPS", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VBLENDPS", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VBLENDPS", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VBLENDPS", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VBLENDPS: bad operands") +} + +// VBLENDVPD: Variable Blend Packed Double Precision Floating-Point Values. +// +// Forms: +// +// VBLENDVPD xmm xmm xmm xmm +// VBLENDVPD xmm m128 xmm xmm +// VBLENDVPD ymm ymm ymm ymm +// VBLENDVPD ymm m256 ymm ymm +func VBLENDVPD(xy, mxy, xy1, xy2 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(xy) && operand.IsXMM(mxy) && operand.IsXMM(xy1) && operand.IsXMM(xy2): + return &intrep.Instruction{ + Opcode: "VBLENDVPD", + Operands: []operand.Op{xy, mxy, xy1, xy2}, + Inputs: []operand.Op{xy, mxy, xy1}, + Outputs: []operand.Op{xy2}, + }, nil + case operand.IsXMM(xy) && operand.IsM128(mxy) && operand.IsXMM(xy1) && operand.IsXMM(xy2): + return &intrep.Instruction{ + Opcode: "VBLENDVPD", + Operands: []operand.Op{xy, mxy, xy1, xy2}, + Inputs: []operand.Op{xy, mxy, xy1}, + Outputs: []operand.Op{xy2}, + }, nil + case operand.IsYMM(xy) && operand.IsYMM(mxy) && operand.IsYMM(xy1) && operand.IsYMM(xy2): + return &intrep.Instruction{ + Opcode: "VBLENDVPD", + Operands: []operand.Op{xy, mxy, xy1, xy2}, + Inputs: []operand.Op{xy, mxy, xy1}, + Outputs: []operand.Op{xy2}, + }, nil + case operand.IsYMM(xy) && operand.IsM256(mxy) && operand.IsYMM(xy1) && operand.IsYMM(xy2): + return &intrep.Instruction{ + Opcode: "VBLENDVPD", + Operands: []operand.Op{xy, mxy, xy1, xy2}, + Inputs: []operand.Op{xy, mxy, xy1}, + Outputs: []operand.Op{xy2}, + }, nil + } + return nil, errors.New("VBLENDVPD: bad operands") +} + +// VBLENDVPS: Variable Blend Packed Single Precision Floating-Point Values. +// +// Forms: +// +// VBLENDVPS xmm xmm xmm xmm +// VBLENDVPS xmm m128 xmm xmm +// VBLENDVPS ymm ymm ymm ymm +// VBLENDVPS ymm m256 ymm ymm +func VBLENDVPS(xy, mxy, xy1, xy2 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(xy) && operand.IsXMM(mxy) && operand.IsXMM(xy1) && operand.IsXMM(xy2): + return &intrep.Instruction{ + Opcode: "VBLENDVPS", + Operands: []operand.Op{xy, mxy, xy1, xy2}, + Inputs: []operand.Op{xy, mxy, xy1}, + Outputs: []operand.Op{xy2}, + }, nil + case operand.IsXMM(xy) && operand.IsM128(mxy) && operand.IsXMM(xy1) && operand.IsXMM(xy2): + return &intrep.Instruction{ + Opcode: "VBLENDVPS", + Operands: []operand.Op{xy, mxy, xy1, xy2}, + Inputs: []operand.Op{xy, mxy, xy1}, + Outputs: []operand.Op{xy2}, + }, nil + case operand.IsYMM(xy) && operand.IsYMM(mxy) && operand.IsYMM(xy1) && operand.IsYMM(xy2): + return &intrep.Instruction{ + Opcode: "VBLENDVPS", + Operands: []operand.Op{xy, mxy, xy1, xy2}, + Inputs: []operand.Op{xy, mxy, xy1}, + Outputs: []operand.Op{xy2}, + }, nil + case operand.IsYMM(xy) && operand.IsM256(mxy) && operand.IsYMM(xy1) && operand.IsYMM(xy2): + return &intrep.Instruction{ + Opcode: "VBLENDVPS", + Operands: []operand.Op{xy, mxy, xy1, xy2}, + Inputs: []operand.Op{xy, mxy, xy1}, + Outputs: []operand.Op{xy2}, + }, nil + } + return nil, errors.New("VBLENDVPS: bad operands") +} + +// VBROADCASTF128: Broadcast 128 Bit of Floating-Point Data. +// +// Forms: +// +// VBROADCASTF128 m128 ymm +func VBROADCASTF128(m, y operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM128(m) && operand.IsYMM(y): + return &intrep.Instruction{ + Opcode: "VBROADCASTF128", + Operands: []operand.Op{m, y}, + Inputs: []operand.Op{m}, + Outputs: []operand.Op{y}, + }, nil + } + return nil, errors.New("VBROADCASTF128: bad operands") +} + +// VBROADCASTI128: Broadcast 128 Bits of Integer Data. +// +// Forms: +// +// VBROADCASTI128 m128 ymm +func VBROADCASTI128(m, y operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM128(m) && operand.IsYMM(y): + return &intrep.Instruction{ + Opcode: "VBROADCASTI128", + Operands: []operand.Op{m, y}, + Inputs: []operand.Op{m}, + Outputs: []operand.Op{y}, + }, nil + } + return nil, errors.New("VBROADCASTI128: bad operands") +} + +// VBROADCASTSD: Broadcast Double-Precision Floating-Point Element. +// +// Forms: +// +// VBROADCASTSD xmm ymm +// VBROADCASTSD m64 ymm +func VBROADCASTSD(mx, y operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsYMM(y): + return &intrep.Instruction{ + Opcode: "VBROADCASTSD", + Operands: []operand.Op{mx, y}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{y}, + }, nil + case operand.IsM64(mx) && operand.IsYMM(y): + return &intrep.Instruction{ + Opcode: "VBROADCASTSD", + Operands: []operand.Op{mx, y}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{y}, + }, nil + } + return nil, errors.New("VBROADCASTSD: bad operands") +} + +// VBROADCASTSS: Broadcast Single-Precision Floating-Point Element. +// +// Forms: +// +// VBROADCASTSS xmm xmm +// VBROADCASTSS m32 xmm +// VBROADCASTSS xmm ymm +// VBROADCASTSS m32 ymm +func VBROADCASTSS(mx, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VBROADCASTSS", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VBROADCASTSS", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsXMM(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VBROADCASTSS", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM32(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VBROADCASTSS", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VBROADCASTSS: bad operands") +} + +// VCMPPD: Compare Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VCMPPD imm8 xmm xmm xmm +// VCMPPD imm8 m128 xmm xmm +// VCMPPD imm8 ymm ymm ymm +// VCMPPD imm8 m256 ymm ymm +func VCMPPD(i, mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VCMPPD", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VCMPPD", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VCMPPD", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VCMPPD", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VCMPPD: bad operands") +} + +// VCMPPS: Compare Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VCMPPS imm8 xmm xmm xmm +// VCMPPS imm8 m128 xmm xmm +// VCMPPS imm8 ymm ymm ymm +// VCMPPS imm8 m256 ymm ymm +func VCMPPS(i, mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VCMPPS", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VCMPPS", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VCMPPS", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VCMPPS", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VCMPPS: bad operands") +} + +// VCMPSD: Compare Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VCMPSD imm8 xmm xmm xmm +// VCMPSD imm8 m64 xmm xmm +func VCMPSD(i, mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VCMPSD", + Operands: []operand.Op{i, mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsIMM8(i) && operand.IsM64(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VCMPSD", + Operands: []operand.Op{i, mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VCMPSD: bad operands") +} + +// VCMPSS: Compare Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VCMPSS imm8 xmm xmm xmm +// VCMPSS imm8 m32 xmm xmm +func VCMPSS(i, mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VCMPSS", + Operands: []operand.Op{i, mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsIMM8(i) && operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VCMPSS", + Operands: []operand.Op{i, mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VCMPSS: bad operands") +} + +// VCOMISD: Compare Scalar Ordered Double-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// VCOMISD xmm xmm +// VCOMISD m64 xmm +func VCOMISD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VCOMISD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VCOMISD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("VCOMISD: bad operands") +} + +// VCOMISS: Compare Scalar Ordered Single-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// VCOMISS xmm xmm +// VCOMISS m32 xmm +func VCOMISS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VCOMISS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VCOMISS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("VCOMISS: bad operands") +} + +// VCVTDQ2PD: Convert Packed Dword Integers to Packed Double-Precision FP Values. +// +// Forms: +// +// VCVTDQ2PD xmm xmm +// VCVTDQ2PD m64 xmm +// VCVTDQ2PD xmm ymm +// VCVTDQ2PD m128 ymm +func VCVTDQ2PD(mx, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTDQ2PD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTDQ2PD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsXMM(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTDQ2PD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM128(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTDQ2PD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VCVTDQ2PD: bad operands") +} + +// VCVTDQ2PS: Convert Packed Dword Integers to Packed Single-Precision FP Values. +// +// Forms: +// +// VCVTDQ2PS xmm xmm +// VCVTDQ2PS m128 xmm +// VCVTDQ2PS ymm ymm +// VCVTDQ2PS m256 ymm +func VCVTDQ2PS(mxy, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTDQ2PS", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTDQ2PS", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTDQ2PS", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTDQ2PS", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VCVTDQ2PS: bad operands") +} + +// VCVTPD2DQX: Convert Packed Double-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// VCVTPD2DQX xmm xmm +// VCVTPD2DQX m128 xmm +func VCVTPD2DQX(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VCVTPD2DQX", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VCVTPD2DQX", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("VCVTPD2DQX: bad operands") +} + +// VCVTPD2DQY: Convert Packed Double-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// VCVTPD2DQY ymm xmm +// VCVTPD2DQY m256 xmm +func VCVTPD2DQY(my, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsYMM(my) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VCVTPD2DQY", + Operands: []operand.Op{my, x}, + Inputs: []operand.Op{my}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM256(my) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VCVTPD2DQY", + Operands: []operand.Op{my, x}, + Inputs: []operand.Op{my}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("VCVTPD2DQY: bad operands") +} + +// VCVTPD2PSX: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values. +// +// Forms: +// +// VCVTPD2PSX xmm xmm +// VCVTPD2PSX m128 xmm +func VCVTPD2PSX(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VCVTPD2PSX", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VCVTPD2PSX", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("VCVTPD2PSX: bad operands") +} + +// VCVTPD2PSY: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values. +// +// Forms: +// +// VCVTPD2PSY ymm xmm +// VCVTPD2PSY m256 xmm +func VCVTPD2PSY(my, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsYMM(my) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VCVTPD2PSY", + Operands: []operand.Op{my, x}, + Inputs: []operand.Op{my}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM256(my) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VCVTPD2PSY", + Operands: []operand.Op{my, x}, + Inputs: []operand.Op{my}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("VCVTPD2PSY: bad operands") +} + +// VCVTPH2PS: Convert Half-Precision FP Values to Single-Precision FP Values. +// +// Forms: +// +// VCVTPH2PS xmm xmm +// VCVTPH2PS m64 xmm +// VCVTPH2PS xmm ymm +// VCVTPH2PS m128 ymm +func VCVTPH2PS(mx, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTPH2PS", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTPH2PS", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsXMM(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTPH2PS", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM128(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTPH2PS", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VCVTPH2PS: bad operands") +} + +// VCVTPS2DQ: Convert Packed Single-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// VCVTPS2DQ xmm xmm +// VCVTPS2DQ m128 xmm +// VCVTPS2DQ ymm ymm +// VCVTPS2DQ m256 ymm +func VCVTPS2DQ(mxy, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTPS2DQ", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTPS2DQ", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTPS2DQ", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTPS2DQ", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VCVTPS2DQ: bad operands") +} + +// VCVTPS2PD: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values. +// +// Forms: +// +// VCVTPS2PD xmm xmm +// VCVTPS2PD m64 xmm +// VCVTPS2PD xmm ymm +// VCVTPS2PD m128 ymm +func VCVTPS2PD(mx, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTPS2PD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTPS2PD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsXMM(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTPS2PD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM128(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTPS2PD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VCVTPS2PD: bad operands") +} + +// VCVTPS2PH: Convert Single-Precision FP value to Half-Precision FP value. +// +// Forms: +// +// VCVTPS2PH imm8 xmm xmm +// VCVTPS2PH imm8 ymm xmm +// VCVTPS2PH imm8 xmm m64 +// VCVTPS2PH imm8 ymm m128 +func VCVTPS2PH(i, xy, mx operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(xy) && operand.IsXMM(mx): + return &intrep.Instruction{ + Opcode: "VCVTPS2PH", + Operands: []operand.Op{i, xy, mx}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{mx}, + }, nil + case operand.IsIMM8(i) && operand.IsYMM(xy) && operand.IsXMM(mx): + return &intrep.Instruction{ + Opcode: "VCVTPS2PH", + Operands: []operand.Op{i, xy, mx}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{mx}, + }, nil + case operand.IsIMM8(i) && operand.IsXMM(xy) && operand.IsM64(mx): + return &intrep.Instruction{ + Opcode: "VCVTPS2PH", + Operands: []operand.Op{i, xy, mx}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{mx}, + }, nil + case operand.IsIMM8(i) && operand.IsYMM(xy) && operand.IsM128(mx): + return &intrep.Instruction{ + Opcode: "VCVTPS2PH", + Operands: []operand.Op{i, xy, mx}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{mx}, + }, nil + } + return nil, errors.New("VCVTPS2PH: bad operands") +} + +// VCVTSD2SI: Convert Scalar Double-Precision FP Value to Integer. +// +// Forms: +// +// VCVTSD2SI xmm r32 +// VCVTSD2SI m64 r32 +func VCVTSD2SI(mx, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "VCVTSD2SI", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mx) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "VCVTSD2SI", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("VCVTSD2SI: bad operands") +} + +// VCVTSD2SIQ: Convert Scalar Double-Precision FP Value to Integer. +// +// Forms: +// +// VCVTSD2SIQ xmm r64 +// VCVTSD2SIQ m64 r64 +func VCVTSD2SIQ(mx, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "VCVTSD2SIQ", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mx) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "VCVTSD2SIQ", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("VCVTSD2SIQ: bad operands") +} + +// VCVTSD2SS: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value. +// +// Forms: +// +// VCVTSD2SS xmm xmm xmm +// VCVTSD2SS m64 xmm xmm +func VCVTSD2SS(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VCVTSD2SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VCVTSD2SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VCVTSD2SS: bad operands") +} + +// VCVTSI2SDL: Convert Dword Integer to Scalar Double-Precision FP Value. +// +// Forms: +// +// VCVTSI2SDL r32 xmm xmm +// VCVTSI2SDL m32 xmm xmm +func VCVTSI2SDL(mr, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VCVTSI2SDL", + Operands: []operand.Op{mr, x, x1}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mr) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VCVTSI2SDL", + Operands: []operand.Op{mr, x, x1}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VCVTSI2SDL: bad operands") +} + +// VCVTSI2SDQ: Convert Dword Integer to Scalar Double-Precision FP Value. +// +// Forms: +// +// VCVTSI2SDQ r64 xmm xmm +// VCVTSI2SDQ m64 xmm xmm +func VCVTSI2SDQ(mr, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VCVTSI2SDQ", + Operands: []operand.Op{mr, x, x1}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM64(mr) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VCVTSI2SDQ", + Operands: []operand.Op{mr, x, x1}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VCVTSI2SDQ: bad operands") +} + +// VCVTSI2SSL: Convert Dword Integer to Scalar Single-Precision FP Value. +// +// Forms: +// +// VCVTSI2SSL r32 xmm xmm +// VCVTSI2SSL m32 xmm xmm +func VCVTSI2SSL(mr, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(mr) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VCVTSI2SSL", + Operands: []operand.Op{mr, x, x1}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mr) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VCVTSI2SSL", + Operands: []operand.Op{mr, x, x1}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VCVTSI2SSL: bad operands") +} + +// VCVTSI2SSQ: Convert Dword Integer to Scalar Single-Precision FP Value. +// +// Forms: +// +// VCVTSI2SSQ r64 xmm xmm +// VCVTSI2SSQ m64 xmm xmm +func VCVTSI2SSQ(mr, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VCVTSI2SSQ", + Operands: []operand.Op{mr, x, x1}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM64(mr) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VCVTSI2SSQ", + Operands: []operand.Op{mr, x, x1}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VCVTSI2SSQ: bad operands") +} + +// VCVTSS2SD: Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value. +// +// Forms: +// +// VCVTSS2SD xmm xmm xmm +// VCVTSS2SD m32 xmm xmm +func VCVTSS2SD(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VCVTSS2SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VCVTSS2SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VCVTSS2SD: bad operands") +} + +// VCVTSS2SI: Convert Scalar Single-Precision FP Value to Dword Integer. +// +// Forms: +// +// VCVTSS2SI xmm r32 +// VCVTSS2SI m32 r32 +func VCVTSS2SI(mx, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "VCVTSS2SI", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mx) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "VCVTSS2SI", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("VCVTSS2SI: bad operands") +} + +// VCVTSS2SIQ: Convert Scalar Single-Precision FP Value to Dword Integer. +// +// Forms: +// +// VCVTSS2SIQ xmm r64 +// VCVTSS2SIQ m32 r64 +func VCVTSS2SIQ(mx, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "VCVTSS2SIQ", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mx) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "VCVTSS2SIQ", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("VCVTSS2SIQ: bad operands") +} + +// VCVTTPD2DQX: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// VCVTTPD2DQX xmm xmm +// VCVTTPD2DQX m128 xmm +func VCVTTPD2DQX(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VCVTTPD2DQX", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VCVTTPD2DQX", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("VCVTTPD2DQX: bad operands") +} + +// VCVTTPD2DQY: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// VCVTTPD2DQY ymm xmm +// VCVTTPD2DQY m256 xmm +func VCVTTPD2DQY(my, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsYMM(my) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VCVTTPD2DQY", + Operands: []operand.Op{my, x}, + Inputs: []operand.Op{my}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM256(my) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VCVTTPD2DQY", + Operands: []operand.Op{my, x}, + Inputs: []operand.Op{my}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("VCVTTPD2DQY: bad operands") +} + +// VCVTTPS2DQ: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers. +// +// Forms: +// +// VCVTTPS2DQ xmm xmm +// VCVTTPS2DQ m128 xmm +// VCVTTPS2DQ ymm ymm +// VCVTTPS2DQ m256 ymm +func VCVTTPS2DQ(mxy, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTTPS2DQ", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTTPS2DQ", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTTPS2DQ", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VCVTTPS2DQ", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VCVTTPS2DQ: bad operands") +} + +// VCVTTSD2SI: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer. +// +// Forms: +// +// VCVTTSD2SI xmm r32 +// VCVTTSD2SI m64 r32 +func VCVTTSD2SI(mx, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "VCVTTSD2SI", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mx) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "VCVTTSD2SI", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("VCVTTSD2SI: bad operands") +} + +// VCVTTSD2SIQ: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer. +// +// Forms: +// +// VCVTTSD2SIQ xmm r64 +// VCVTTSD2SIQ m64 r64 +func VCVTTSD2SIQ(mx, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "VCVTTSD2SIQ", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM64(mx) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "VCVTTSD2SIQ", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("VCVTTSD2SIQ: bad operands") +} + +// VCVTTSS2SI: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer. +// +// Forms: +// +// VCVTTSS2SI xmm r32 +// VCVTTSS2SI m32 r32 +func VCVTTSS2SI(mx, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "VCVTTSS2SI", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mx) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "VCVTTSS2SI", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("VCVTTSS2SI: bad operands") +} + +// VCVTTSS2SIQ: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer. +// +// Forms: +// +// VCVTTSS2SIQ xmm r64 +// VCVTTSS2SIQ m32 r64 +func VCVTTSS2SIQ(mx, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "VCVTTSS2SIQ", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsM32(mx) && operand.IsR64(r): + return &intrep.Instruction{ + Opcode: "VCVTTSS2SIQ", + Operands: []operand.Op{mx, r}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("VCVTTSS2SIQ: bad operands") +} + +// VDIVPD: Divide Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VDIVPD xmm xmm xmm +// VDIVPD m128 xmm xmm +// VDIVPD ymm ymm ymm +// VDIVPD m256 ymm ymm +func VDIVPD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VDIVPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VDIVPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VDIVPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VDIVPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VDIVPD: bad operands") +} + +// VDIVPS: Divide Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VDIVPS xmm xmm xmm +// VDIVPS m128 xmm xmm +// VDIVPS ymm ymm ymm +// VDIVPS m256 ymm ymm +func VDIVPS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VDIVPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VDIVPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VDIVPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VDIVPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VDIVPS: bad operands") +} + +// VDIVSD: Divide Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VDIVSD xmm xmm xmm +// VDIVSD m64 xmm xmm +func VDIVSD(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VDIVSD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VDIVSD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VDIVSD: bad operands") +} + +// VDIVSS: Divide Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VDIVSS xmm xmm xmm +// VDIVSS m32 xmm xmm +func VDIVSS(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VDIVSS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VDIVSS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VDIVSS: bad operands") +} + +// VDPPD: Dot Product of Packed Double Precision Floating-Point Values. +// +// Forms: +// +// VDPPD imm8 xmm xmm xmm +// VDPPD imm8 m128 xmm xmm +func VDPPD(i, mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VDPPD", + Operands: []operand.Op{i, mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VDPPD", + Operands: []operand.Op{i, mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VDPPD: bad operands") +} + +// VDPPS: Dot Product of Packed Single Precision Floating-Point Values. +// +// Forms: +// +// VDPPS imm8 xmm xmm xmm +// VDPPS imm8 m128 xmm xmm +// VDPPS imm8 ymm ymm ymm +// VDPPS imm8 m256 ymm ymm +func VDPPS(i, mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VDPPS", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VDPPS", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VDPPS", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VDPPS", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VDPPS: bad operands") +} + +// VEXTRACTF128: Extract Packed Floating-Point Values. +// +// Forms: +// +// VEXTRACTF128 imm8 ymm xmm +// VEXTRACTF128 imm8 ymm m128 +func VEXTRACTF128(i, y, mx operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsYMM(y) && operand.IsXMM(mx): + return &intrep.Instruction{ + Opcode: "VEXTRACTF128", + Operands: []operand.Op{i, y, mx}, + Inputs: []operand.Op{y}, + Outputs: []operand.Op{mx}, + }, nil + case operand.IsIMM8(i) && operand.IsYMM(y) && operand.IsM128(mx): + return &intrep.Instruction{ + Opcode: "VEXTRACTF128", + Operands: []operand.Op{i, y, mx}, + Inputs: []operand.Op{y}, + Outputs: []operand.Op{mx}, + }, nil + } + return nil, errors.New("VEXTRACTF128: bad operands") +} + +// VEXTRACTI128: Extract Packed Integer Values. +// +// Forms: +// +// VEXTRACTI128 imm8 ymm xmm +// VEXTRACTI128 imm8 ymm m128 +func VEXTRACTI128(i, y, mx operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsYMM(y) && operand.IsXMM(mx): + return &intrep.Instruction{ + Opcode: "VEXTRACTI128", + Operands: []operand.Op{i, y, mx}, + Inputs: []operand.Op{y}, + Outputs: []operand.Op{mx}, + }, nil + case operand.IsIMM8(i) && operand.IsYMM(y) && operand.IsM128(mx): + return &intrep.Instruction{ + Opcode: "VEXTRACTI128", + Operands: []operand.Op{i, y, mx}, + Inputs: []operand.Op{y}, + Outputs: []operand.Op{mx}, + }, nil + } + return nil, errors.New("VEXTRACTI128: bad operands") +} + +// VEXTRACTPS: Extract Packed Single Precision Floating-Point Value. +// +// Forms: +// +// VEXTRACTPS imm8 xmm r32 +// VEXTRACTPS imm8 xmm m32 +func VEXTRACTPS(i, x, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(x) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "VEXTRACTPS", + Operands: []operand.Op{i, x, mr}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(i) && operand.IsXMM(x) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "VEXTRACTPS", + Operands: []operand.Op{i, x, mr}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("VEXTRACTPS: bad operands") +} + +// VFMADD132PD: Fused Multiply-Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD132PD xmm xmm xmm +// VFMADD132PD m128 xmm xmm +// VFMADD132PD ymm ymm ymm +// VFMADD132PD m256 ymm ymm +func VFMADD132PD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMADD132PD: bad operands") +} + +// VFMADD132PS: Fused Multiply-Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD132PS xmm xmm xmm +// VFMADD132PS m128 xmm xmm +// VFMADD132PS ymm ymm ymm +// VFMADD132PS m256 ymm ymm +func VFMADD132PS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMADD132PS: bad operands") +} + +// VFMADD132SD: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD132SD xmm xmm xmm +// VFMADD132SD m64 xmm xmm +func VFMADD132SD(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMADD132SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMADD132SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFMADD132SD: bad operands") +} + +// VFMADD132SS: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD132SS xmm xmm xmm +// VFMADD132SS m32 xmm xmm +func VFMADD132SS(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMADD132SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMADD132SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFMADD132SS: bad operands") +} + +// VFMADD213PD: Fused Multiply-Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD213PD xmm xmm xmm +// VFMADD213PD m128 xmm xmm +// VFMADD213PD ymm ymm ymm +// VFMADD213PD m256 ymm ymm +func VFMADD213PD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMADD213PD: bad operands") +} + +// VFMADD213PS: Fused Multiply-Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD213PS xmm xmm xmm +// VFMADD213PS m128 xmm xmm +// VFMADD213PS ymm ymm ymm +// VFMADD213PS m256 ymm ymm +func VFMADD213PS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMADD213PS: bad operands") +} + +// VFMADD213SD: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD213SD xmm xmm xmm +// VFMADD213SD m64 xmm xmm +func VFMADD213SD(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMADD213SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMADD213SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFMADD213SD: bad operands") +} + +// VFMADD213SS: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD213SS xmm xmm xmm +// VFMADD213SS m32 xmm xmm +func VFMADD213SS(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMADD213SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMADD213SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFMADD213SS: bad operands") +} + +// VFMADD231PD: Fused Multiply-Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD231PD xmm xmm xmm +// VFMADD231PD m128 xmm xmm +// VFMADD231PD ymm ymm ymm +// VFMADD231PD m256 ymm ymm +func VFMADD231PD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMADD231PD: bad operands") +} + +// VFMADD231PS: Fused Multiply-Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD231PS xmm xmm xmm +// VFMADD231PS m128 xmm xmm +// VFMADD231PS ymm ymm ymm +// VFMADD231PS m256 ymm ymm +func VFMADD231PS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADD231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMADD231PS: bad operands") +} + +// VFMADD231SD: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD231SD xmm xmm xmm +// VFMADD231SD m64 xmm xmm +func VFMADD231SD(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMADD231SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMADD231SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFMADD231SD: bad operands") +} + +// VFMADD231SS: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADD231SS xmm xmm xmm +// VFMADD231SS m32 xmm xmm +func VFMADD231SS(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMADD231SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMADD231SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFMADD231SS: bad operands") +} + +// VFMADDSUB132PD: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADDSUB132PD xmm xmm xmm +// VFMADDSUB132PD m128 xmm xmm +// VFMADDSUB132PD ymm ymm ymm +// VFMADDSUB132PD m256 ymm ymm +func VFMADDSUB132PD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMADDSUB132PD: bad operands") +} + +// VFMADDSUB132PS: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADDSUB132PS xmm xmm xmm +// VFMADDSUB132PS m128 xmm xmm +// VFMADDSUB132PS ymm ymm ymm +// VFMADDSUB132PS m256 ymm ymm +func VFMADDSUB132PS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMADDSUB132PS: bad operands") +} + +// VFMADDSUB213PD: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADDSUB213PD xmm xmm xmm +// VFMADDSUB213PD m128 xmm xmm +// VFMADDSUB213PD ymm ymm ymm +// VFMADDSUB213PD m256 ymm ymm +func VFMADDSUB213PD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMADDSUB213PD: bad operands") +} + +// VFMADDSUB213PS: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADDSUB213PS xmm xmm xmm +// VFMADDSUB213PS m128 xmm xmm +// VFMADDSUB213PS ymm ymm ymm +// VFMADDSUB213PS m256 ymm ymm +func VFMADDSUB213PS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMADDSUB213PS: bad operands") +} + +// VFMADDSUB231PD: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMADDSUB231PD xmm xmm xmm +// VFMADDSUB231PD m128 xmm xmm +// VFMADDSUB231PD ymm ymm ymm +// VFMADDSUB231PD m256 ymm ymm +func VFMADDSUB231PD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMADDSUB231PD: bad operands") +} + +// VFMADDSUB231PS: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMADDSUB231PS xmm xmm xmm +// VFMADDSUB231PS m128 xmm xmm +// VFMADDSUB231PS ymm ymm ymm +// VFMADDSUB231PS m256 ymm ymm +func VFMADDSUB231PS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMADDSUB231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMADDSUB231PS: bad operands") +} + +// VFMSUB132PD: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB132PD xmm xmm xmm +// VFMSUB132PD m128 xmm xmm +// VFMSUB132PD ymm ymm ymm +// VFMSUB132PD m256 ymm ymm +func VFMSUB132PD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMSUB132PD: bad operands") +} + +// VFMSUB132PS: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB132PS xmm xmm xmm +// VFMSUB132PS m128 xmm xmm +// VFMSUB132PS ymm ymm ymm +// VFMSUB132PS m256 ymm ymm +func VFMSUB132PS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMSUB132PS: bad operands") +} + +// VFMSUB132SD: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB132SD xmm xmm xmm +// VFMSUB132SD m64 xmm xmm +func VFMSUB132SD(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMSUB132SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMSUB132SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFMSUB132SD: bad operands") +} + +// VFMSUB132SS: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB132SS xmm xmm xmm +// VFMSUB132SS m32 xmm xmm +func VFMSUB132SS(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMSUB132SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMSUB132SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFMSUB132SS: bad operands") +} + +// VFMSUB213PD: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB213PD xmm xmm xmm +// VFMSUB213PD m128 xmm xmm +// VFMSUB213PD ymm ymm ymm +// VFMSUB213PD m256 ymm ymm +func VFMSUB213PD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMSUB213PD: bad operands") +} + +// VFMSUB213PS: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB213PS xmm xmm xmm +// VFMSUB213PS m128 xmm xmm +// VFMSUB213PS ymm ymm ymm +// VFMSUB213PS m256 ymm ymm +func VFMSUB213PS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMSUB213PS: bad operands") +} + +// VFMSUB213SD: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB213SD xmm xmm xmm +// VFMSUB213SD m64 xmm xmm +func VFMSUB213SD(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMSUB213SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMSUB213SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFMSUB213SD: bad operands") +} + +// VFMSUB213SS: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB213SS xmm xmm xmm +// VFMSUB213SS m32 xmm xmm +func VFMSUB213SS(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMSUB213SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMSUB213SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFMSUB213SS: bad operands") +} + +// VFMSUB231PD: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB231PD xmm xmm xmm +// VFMSUB231PD m128 xmm xmm +// VFMSUB231PD ymm ymm ymm +// VFMSUB231PD m256 ymm ymm +func VFMSUB231PD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMSUB231PD: bad operands") +} + +// VFMSUB231PS: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB231PS xmm xmm xmm +// VFMSUB231PS m128 xmm xmm +// VFMSUB231PS ymm ymm ymm +// VFMSUB231PS m256 ymm ymm +func VFMSUB231PS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUB231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMSUB231PS: bad operands") +} + +// VFMSUB231SD: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB231SD xmm xmm xmm +// VFMSUB231SD m64 xmm xmm +func VFMSUB231SD(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMSUB231SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMSUB231SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFMSUB231SD: bad operands") +} + +// VFMSUB231SS: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUB231SS xmm xmm xmm +// VFMSUB231SS m32 xmm xmm +func VFMSUB231SS(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMSUB231SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFMSUB231SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFMSUB231SS: bad operands") +} + +// VFMSUBADD132PD: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUBADD132PD xmm xmm xmm +// VFMSUBADD132PD m128 xmm xmm +// VFMSUBADD132PD ymm ymm ymm +// VFMSUBADD132PD m256 ymm ymm +func VFMSUBADD132PD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMSUBADD132PD: bad operands") +} + +// VFMSUBADD132PS: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUBADD132PS xmm xmm xmm +// VFMSUBADD132PS m128 xmm xmm +// VFMSUBADD132PS ymm ymm ymm +// VFMSUBADD132PS m256 ymm ymm +func VFMSUBADD132PS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMSUBADD132PS: bad operands") +} + +// VFMSUBADD213PD: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUBADD213PD xmm xmm xmm +// VFMSUBADD213PD m128 xmm xmm +// VFMSUBADD213PD ymm ymm ymm +// VFMSUBADD213PD m256 ymm ymm +func VFMSUBADD213PD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMSUBADD213PD: bad operands") +} + +// VFMSUBADD213PS: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUBADD213PS xmm xmm xmm +// VFMSUBADD213PS m128 xmm xmm +// VFMSUBADD213PS ymm ymm ymm +// VFMSUBADD213PS m256 ymm ymm +func VFMSUBADD213PS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMSUBADD213PS: bad operands") +} + +// VFMSUBADD231PD: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUBADD231PD xmm xmm xmm +// VFMSUBADD231PD m128 xmm xmm +// VFMSUBADD231PD ymm ymm ymm +// VFMSUBADD231PD m256 ymm ymm +func VFMSUBADD231PD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMSUBADD231PD: bad operands") +} + +// VFMSUBADD231PS: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFMSUBADD231PS xmm xmm xmm +// VFMSUBADD231PS m128 xmm xmm +// VFMSUBADD231PS ymm ymm ymm +// VFMSUBADD231PS m256 ymm ymm +func VFMSUBADD231PS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFMSUBADD231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFMSUBADD231PS: bad operands") +} + +// VFNMADD132PD: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD132PD xmm xmm xmm +// VFNMADD132PD m128 xmm xmm +// VFNMADD132PD ymm ymm ymm +// VFNMADD132PD m256 ymm ymm +func VFNMADD132PD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFNMADD132PD: bad operands") +} + +// VFNMADD132PS: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD132PS xmm xmm xmm +// VFNMADD132PS m128 xmm xmm +// VFNMADD132PS ymm ymm ymm +// VFNMADD132PS m256 ymm ymm +func VFNMADD132PS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFNMADD132PS: bad operands") +} + +// VFNMADD132SD: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD132SD xmm xmm xmm +// VFNMADD132SD m64 xmm xmm +func VFNMADD132SD(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMADD132SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMADD132SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFNMADD132SD: bad operands") +} + +// VFNMADD132SS: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD132SS xmm xmm xmm +// VFNMADD132SS m32 xmm xmm +func VFNMADD132SS(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMADD132SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMADD132SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFNMADD132SS: bad operands") +} + +// VFNMADD213PD: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD213PD xmm xmm xmm +// VFNMADD213PD m128 xmm xmm +// VFNMADD213PD ymm ymm ymm +// VFNMADD213PD m256 ymm ymm +func VFNMADD213PD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFNMADD213PD: bad operands") +} + +// VFNMADD213PS: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD213PS xmm xmm xmm +// VFNMADD213PS m128 xmm xmm +// VFNMADD213PS ymm ymm ymm +// VFNMADD213PS m256 ymm ymm +func VFNMADD213PS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFNMADD213PS: bad operands") +} + +// VFNMADD213SD: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD213SD xmm xmm xmm +// VFNMADD213SD m64 xmm xmm +func VFNMADD213SD(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMADD213SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMADD213SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFNMADD213SD: bad operands") +} + +// VFNMADD213SS: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD213SS xmm xmm xmm +// VFNMADD213SS m32 xmm xmm +func VFNMADD213SS(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMADD213SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMADD213SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFNMADD213SS: bad operands") +} + +// VFNMADD231PD: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD231PD xmm xmm xmm +// VFNMADD231PD m128 xmm xmm +// VFNMADD231PD ymm ymm ymm +// VFNMADD231PD m256 ymm ymm +func VFNMADD231PD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFNMADD231PD: bad operands") +} + +// VFNMADD231PS: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD231PS xmm xmm xmm +// VFNMADD231PS m128 xmm xmm +// VFNMADD231PS ymm ymm ymm +// VFNMADD231PS m256 ymm ymm +func VFNMADD231PS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMADD231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFNMADD231PS: bad operands") +} + +// VFNMADD231SD: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD231SD xmm xmm xmm +// VFNMADD231SD m64 xmm xmm +func VFNMADD231SD(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMADD231SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMADD231SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFNMADD231SD: bad operands") +} + +// VFNMADD231SS: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMADD231SS xmm xmm xmm +// VFNMADD231SS m32 xmm xmm +func VFNMADD231SS(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMADD231SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMADD231SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFNMADD231SS: bad operands") +} + +// VFNMSUB132PD: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB132PD xmm xmm xmm +// VFNMSUB132PD m128 xmm xmm +// VFNMSUB132PD ymm ymm ymm +// VFNMSUB132PD m256 ymm ymm +func VFNMSUB132PD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB132PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFNMSUB132PD: bad operands") +} + +// VFNMSUB132PS: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB132PS xmm xmm xmm +// VFNMSUB132PS m128 xmm xmm +// VFNMSUB132PS ymm ymm ymm +// VFNMSUB132PS m256 ymm ymm +func VFNMSUB132PS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB132PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFNMSUB132PS: bad operands") +} + +// VFNMSUB132SD: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB132SD xmm xmm xmm +// VFNMSUB132SD m64 xmm xmm +func VFNMSUB132SD(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMSUB132SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMSUB132SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFNMSUB132SD: bad operands") +} + +// VFNMSUB132SS: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB132SS xmm xmm xmm +// VFNMSUB132SS m32 xmm xmm +func VFNMSUB132SS(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMSUB132SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMSUB132SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFNMSUB132SS: bad operands") +} + +// VFNMSUB213PD: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB213PD xmm xmm xmm +// VFNMSUB213PD m128 xmm xmm +// VFNMSUB213PD ymm ymm ymm +// VFNMSUB213PD m256 ymm ymm +func VFNMSUB213PD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB213PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFNMSUB213PD: bad operands") +} + +// VFNMSUB213PS: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB213PS xmm xmm xmm +// VFNMSUB213PS m128 xmm xmm +// VFNMSUB213PS ymm ymm ymm +// VFNMSUB213PS m256 ymm ymm +func VFNMSUB213PS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB213PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFNMSUB213PS: bad operands") +} + +// VFNMSUB213SD: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB213SD xmm xmm xmm +// VFNMSUB213SD m64 xmm xmm +func VFNMSUB213SD(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMSUB213SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMSUB213SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFNMSUB213SD: bad operands") +} + +// VFNMSUB213SS: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB213SS xmm xmm xmm +// VFNMSUB213SS m32 xmm xmm +func VFNMSUB213SS(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMSUB213SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMSUB213SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFNMSUB213SS: bad operands") +} + +// VFNMSUB231PD: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB231PD xmm xmm xmm +// VFNMSUB231PD m128 xmm xmm +// VFNMSUB231PD ymm ymm ymm +// VFNMSUB231PD m256 ymm ymm +func VFNMSUB231PD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB231PD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFNMSUB231PD: bad operands") +} + +// VFNMSUB231PS: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB231PS xmm xmm xmm +// VFNMSUB231PS m128 xmm xmm +// VFNMSUB231PS ymm ymm ymm +// VFNMSUB231PS m256 ymm ymm +func VFNMSUB231PS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VFNMSUB231PS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy, xy1}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VFNMSUB231PS: bad operands") +} + +// VFNMSUB231SD: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB231SD xmm xmm xmm +// VFNMSUB231SD m64 xmm xmm +func VFNMSUB231SD(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMSUB231SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMSUB231SD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFNMSUB231SD: bad operands") +} + +// VFNMSUB231SS: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VFNMSUB231SS xmm xmm xmm +// VFNMSUB231SS m32 xmm xmm +func VFNMSUB231SS(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMSUB231SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VFNMSUB231SS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x, x1}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VFNMSUB231SS: bad operands") +} + +// VGATHERDPD: Gather Packed Double-Precision Floating-Point Values Using Signed Doubleword Indices. +// +// Forms: +// +// VGATHERDPD xmm vm32x xmm +// VGATHERDPD ymm vm32x ymm +func VGATHERDPD(xy, v, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(xy) && operand.IsVM32X(v) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VGATHERDPD", + Operands: []operand.Op{xy, v, xy1}, + Inputs: []operand.Op{xy, v, xy1}, + Outputs: []operand.Op{xy, xy1}, + }, nil + case operand.IsYMM(xy) && operand.IsVM32X(v) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VGATHERDPD", + Operands: []operand.Op{xy, v, xy1}, + Inputs: []operand.Op{xy, v, xy1}, + Outputs: []operand.Op{xy, xy1}, + }, nil + } + return nil, errors.New("VGATHERDPD: bad operands") +} + +// VGATHERDPS: Gather Packed Single-Precision Floating-Point Values Using Signed Doubleword Indices. +// +// Forms: +// +// VGATHERDPS xmm vm32x xmm +// VGATHERDPS ymm vm32y ymm +func VGATHERDPS(xy, v, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(xy) && operand.IsVM32X(v) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VGATHERDPS", + Operands: []operand.Op{xy, v, xy1}, + Inputs: []operand.Op{xy, v, xy1}, + Outputs: []operand.Op{xy, xy1}, + }, nil + case operand.IsYMM(xy) && operand.IsVM32Y(v) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VGATHERDPS", + Operands: []operand.Op{xy, v, xy1}, + Inputs: []operand.Op{xy, v, xy1}, + Outputs: []operand.Op{xy, xy1}, + }, nil + } + return nil, errors.New("VGATHERDPS: bad operands") +} + +// VGATHERQPD: Gather Packed Double-Precision Floating-Point Values Using Signed Quadword Indices. +// +// Forms: +// +// VGATHERQPD xmm vm64x xmm +// VGATHERQPD ymm vm64y ymm +func VGATHERQPD(xy, v, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(xy) && operand.IsVM64X(v) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VGATHERQPD", + Operands: []operand.Op{xy, v, xy1}, + Inputs: []operand.Op{xy, v, xy1}, + Outputs: []operand.Op{xy, xy1}, + }, nil + case operand.IsYMM(xy) && operand.IsVM64Y(v) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VGATHERQPD", + Operands: []operand.Op{xy, v, xy1}, + Inputs: []operand.Op{xy, v, xy1}, + Outputs: []operand.Op{xy, xy1}, + }, nil + } + return nil, errors.New("VGATHERQPD: bad operands") +} + +// VGATHERQPS: Gather Packed Single-Precision Floating-Point Values Using Signed Quadword Indices. +// +// Forms: +// +// VGATHERQPS xmm vm64x xmm +// VGATHERQPS xmm vm64y xmm +func VGATHERQPS(x, v, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(x) && operand.IsVM64X(v) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VGATHERQPS", + Operands: []operand.Op{x, v, x1}, + Inputs: []operand.Op{x, v, x1}, + Outputs: []operand.Op{x, x1}, + }, nil + case operand.IsXMM(x) && operand.IsVM64Y(v) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VGATHERQPS", + Operands: []operand.Op{x, v, x1}, + Inputs: []operand.Op{x, v, x1}, + Outputs: []operand.Op{x, x1}, + }, nil + } + return nil, errors.New("VGATHERQPS: bad operands") +} + +// VHADDPD: Packed Double-FP Horizontal Add. +// +// Forms: +// +// VHADDPD xmm xmm xmm +// VHADDPD m128 xmm xmm +// VHADDPD ymm ymm ymm +// VHADDPD m256 ymm ymm +func VHADDPD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VHADDPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VHADDPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VHADDPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VHADDPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VHADDPD: bad operands") +} + +// VHADDPS: Packed Single-FP Horizontal Add. +// +// Forms: +// +// VHADDPS xmm xmm xmm +// VHADDPS m128 xmm xmm +// VHADDPS ymm ymm ymm +// VHADDPS m256 ymm ymm +func VHADDPS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VHADDPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VHADDPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VHADDPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VHADDPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VHADDPS: bad operands") +} + +// VHSUBPD: Packed Double-FP Horizontal Subtract. +// +// Forms: +// +// VHSUBPD xmm xmm xmm +// VHSUBPD m128 xmm xmm +// VHSUBPD ymm ymm ymm +// VHSUBPD m256 ymm ymm +func VHSUBPD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VHSUBPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VHSUBPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VHSUBPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VHSUBPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VHSUBPD: bad operands") +} + +// VHSUBPS: Packed Single-FP Horizontal Subtract. +// +// Forms: +// +// VHSUBPS xmm xmm xmm +// VHSUBPS m128 xmm xmm +// VHSUBPS ymm ymm ymm +// VHSUBPS m256 ymm ymm +func VHSUBPS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VHSUBPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VHSUBPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VHSUBPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VHSUBPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VHSUBPS: bad operands") +} + +// VINSERTF128: Insert Packed Floating-Point Values. +// +// Forms: +// +// VINSERTF128 imm8 xmm ymm ymm +// VINSERTF128 imm8 m128 ymm ymm +func VINSERTF128(i, mx, y, y1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsYMM(y) && operand.IsYMM(y1): + return &intrep.Instruction{ + Opcode: "VINSERTF128", + Operands: []operand.Op{i, mx, y, y1}, + Inputs: []operand.Op{mx, y}, + Outputs: []operand.Op{y1}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsYMM(y) && operand.IsYMM(y1): + return &intrep.Instruction{ + Opcode: "VINSERTF128", + Operands: []operand.Op{i, mx, y, y1}, + Inputs: []operand.Op{mx, y}, + Outputs: []operand.Op{y1}, + }, nil + } + return nil, errors.New("VINSERTF128: bad operands") +} + +// VINSERTI128: Insert Packed Integer Values. +// +// Forms: +// +// VINSERTI128 imm8 xmm ymm ymm +// VINSERTI128 imm8 m128 ymm ymm +func VINSERTI128(i, mx, y, y1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsYMM(y) && operand.IsYMM(y1): + return &intrep.Instruction{ + Opcode: "VINSERTI128", + Operands: []operand.Op{i, mx, y, y1}, + Inputs: []operand.Op{mx, y}, + Outputs: []operand.Op{y1}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsYMM(y) && operand.IsYMM(y1): + return &intrep.Instruction{ + Opcode: "VINSERTI128", + Operands: []operand.Op{i, mx, y, y1}, + Inputs: []operand.Op{mx, y}, + Outputs: []operand.Op{y1}, + }, nil + } + return nil, errors.New("VINSERTI128: bad operands") +} + +// VINSERTPS: Insert Packed Single Precision Floating-Point Value. +// +// Forms: +// +// VINSERTPS imm8 xmm xmm xmm +// VINSERTPS imm8 m32 xmm xmm +func VINSERTPS(i, mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VINSERTPS", + Operands: []operand.Op{i, mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsIMM8(i) && operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VINSERTPS", + Operands: []operand.Op{i, mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VINSERTPS: bad operands") +} + +// VLDDQU: Load Unaligned Integer 128 Bits. +// +// Forms: +// +// VLDDQU m128 xmm +// VLDDQU m256 ymm +func VLDDQU(m, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM128(m) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VLDDQU", + Operands: []operand.Op{m, xy}, + Inputs: []operand.Op{m}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM256(m) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VLDDQU", + Operands: []operand.Op{m, xy}, + Inputs: []operand.Op{m}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VLDDQU: bad operands") +} + +// VLDMXCSR: Load MXCSR Register. +// +// Forms: +// +// VLDMXCSR m32 +func VLDMXCSR(m operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM32(m): + return &intrep.Instruction{ + Opcode: "VLDMXCSR", + Operands: []operand.Op{m}, + Inputs: []operand.Op{m}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("VLDMXCSR: bad operands") +} + +// VMASKMOVDQU: Store Selected Bytes of Double Quadword. +// +// Forms: +// +// VMASKMOVDQU xmm xmm +func VMASKMOVDQU(x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VMASKMOVDQU", + Operands: []operand.Op{x, x1}, + Inputs: []operand.Op{x, x1, reg.RDI}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("VMASKMOVDQU: bad operands") +} + +// VMASKMOVPD: Conditional Move Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VMASKMOVPD m128 xmm xmm +// VMASKMOVPD m256 ymm ymm +// VMASKMOVPD xmm xmm m128 +// VMASKMOVPD ymm ymm m256 +func VMASKMOVPD(mxy, xy, mxy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMASKMOVPD", + Operands: []operand.Op{mxy, xy, mxy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMASKMOVPD", + Operands: []operand.Op{mxy, xy, mxy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsM128(mxy1): + return &intrep.Instruction{ + Opcode: "VMASKMOVPD", + Operands: []operand.Op{mxy, xy, mxy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsM256(mxy1): + return &intrep.Instruction{ + Opcode: "VMASKMOVPD", + Operands: []operand.Op{mxy, xy, mxy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{mxy1}, + }, nil + } + return nil, errors.New("VMASKMOVPD: bad operands") +} + +// VMASKMOVPS: Conditional Move Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMASKMOVPS m128 xmm xmm +// VMASKMOVPS m256 ymm ymm +// VMASKMOVPS xmm xmm m128 +// VMASKMOVPS ymm ymm m256 +func VMASKMOVPS(mxy, xy, mxy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMASKMOVPS", + Operands: []operand.Op{mxy, xy, mxy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMASKMOVPS", + Operands: []operand.Op{mxy, xy, mxy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsM128(mxy1): + return &intrep.Instruction{ + Opcode: "VMASKMOVPS", + Operands: []operand.Op{mxy, xy, mxy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsM256(mxy1): + return &intrep.Instruction{ + Opcode: "VMASKMOVPS", + Operands: []operand.Op{mxy, xy, mxy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{mxy1}, + }, nil + } + return nil, errors.New("VMASKMOVPS: bad operands") +} + +// VMAXPD: Return Maximum Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VMAXPD xmm xmm xmm +// VMAXPD m128 xmm xmm +// VMAXPD ymm ymm ymm +// VMAXPD m256 ymm ymm +func VMAXPD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VMAXPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VMAXPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VMAXPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VMAXPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VMAXPD: bad operands") +} + +// VMAXPS: Return Maximum Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMAXPS xmm xmm xmm +// VMAXPS m128 xmm xmm +// VMAXPS ymm ymm ymm +// VMAXPS m256 ymm ymm +func VMAXPS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VMAXPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VMAXPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VMAXPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VMAXPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VMAXPS: bad operands") +} + +// VMAXSD: Return Maximum Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// VMAXSD xmm xmm xmm +// VMAXSD m64 xmm xmm +func VMAXSD(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VMAXSD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VMAXSD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VMAXSD: bad operands") +} + +// VMAXSS: Return Maximum Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// VMAXSS xmm xmm xmm +// VMAXSS m32 xmm xmm +func VMAXSS(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VMAXSS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VMAXSS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VMAXSS: bad operands") +} + +// VMINPD: Return Minimum Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VMINPD xmm xmm xmm +// VMINPD m128 xmm xmm +// VMINPD ymm ymm ymm +// VMINPD m256 ymm ymm +func VMINPD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VMINPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VMINPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VMINPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VMINPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VMINPD: bad operands") +} + +// VMINPS: Return Minimum Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMINPS xmm xmm xmm +// VMINPS m128 xmm xmm +// VMINPS ymm ymm ymm +// VMINPS m256 ymm ymm +func VMINPS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VMINPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VMINPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VMINPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VMINPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VMINPS: bad operands") +} + +// VMINSD: Return Minimum Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// VMINSD xmm xmm xmm +// VMINSD m64 xmm xmm +func VMINSD(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VMINSD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VMINSD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VMINSD: bad operands") +} + +// VMINSS: Return Minimum Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// VMINSS xmm xmm xmm +// VMINSS m32 xmm xmm +func VMINSS(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VMINSS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VMINSS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VMINSS: bad operands") +} + +// VMOVAPD: Move Aligned Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VMOVAPD xmm xmm +// VMOVAPD m128 xmm +// VMOVAPD ymm ymm +// VMOVAPD m256 ymm +// VMOVAPD xmm m128 +// VMOVAPD ymm m256 +func VMOVAPD(mxy, mxy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVAPD", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVAPD", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVAPD", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVAPD", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsXMM(mxy) && operand.IsM128(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVAPD", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsM256(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVAPD", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + } + return nil, errors.New("VMOVAPD: bad operands") +} + +// VMOVAPS: Move Aligned Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMOVAPS xmm xmm +// VMOVAPS m128 xmm +// VMOVAPS ymm ymm +// VMOVAPS m256 ymm +// VMOVAPS xmm m128 +// VMOVAPS ymm m256 +func VMOVAPS(mxy, mxy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVAPS", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVAPS", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVAPS", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVAPS", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsXMM(mxy) && operand.IsM128(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVAPS", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsM256(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVAPS", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + } + return nil, errors.New("VMOVAPS: bad operands") +} + +// VMOVD: Move Doubleword. +// +// Forms: +// +// VMOVD xmm r32 +// VMOVD r32 xmm +// VMOVD m32 xmm +// VMOVD xmm m32 +func VMOVD(mrx, mrx1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mrx) && operand.IsR32(mrx1): + return &intrep.Instruction{ + Opcode: "VMOVD", + Operands: []operand.Op{mrx, mrx1}, + Inputs: []operand.Op{mrx}, + Outputs: []operand.Op{mrx1}, + }, nil + case operand.IsR32(mrx) && operand.IsXMM(mrx1): + return &intrep.Instruction{ + Opcode: "VMOVD", + Operands: []operand.Op{mrx, mrx1}, + Inputs: []operand.Op{mrx}, + Outputs: []operand.Op{mrx1}, + }, nil + case operand.IsM32(mrx) && operand.IsXMM(mrx1): + return &intrep.Instruction{ + Opcode: "VMOVD", + Operands: []operand.Op{mrx, mrx1}, + Inputs: []operand.Op{mrx}, + Outputs: []operand.Op{mrx1}, + }, nil + case operand.IsXMM(mrx) && operand.IsM32(mrx1): + return &intrep.Instruction{ + Opcode: "VMOVD", + Operands: []operand.Op{mrx, mrx1}, + Inputs: []operand.Op{mrx}, + Outputs: []operand.Op{mrx1}, + }, nil + } + return nil, errors.New("VMOVD: bad operands") +} + +// VMOVDDUP: Move One Double-FP and Duplicate. +// +// Forms: +// +// VMOVDDUP xmm xmm +// VMOVDDUP m64 xmm +// VMOVDDUP ymm ymm +// VMOVDDUP m256 ymm +func VMOVDDUP(mxy, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VMOVDDUP", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM64(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VMOVDDUP", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VMOVDDUP", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VMOVDDUP", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VMOVDDUP: bad operands") +} + +// VMOVDQA: Move Aligned Double Quadword. +// +// Forms: +// +// VMOVDQA xmm xmm +// VMOVDQA m128 xmm +// VMOVDQA ymm ymm +// VMOVDQA m256 ymm +// VMOVDQA xmm m128 +// VMOVDQA ymm m256 +func VMOVDQA(mxy, mxy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVDQA", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVDQA", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVDQA", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVDQA", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsXMM(mxy) && operand.IsM128(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVDQA", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsM256(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVDQA", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + } + return nil, errors.New("VMOVDQA: bad operands") +} + +// VMOVDQU: Move Unaligned Double Quadword. +// +// Forms: +// +// VMOVDQU xmm xmm +// VMOVDQU m128 xmm +// VMOVDQU ymm ymm +// VMOVDQU m256 ymm +// VMOVDQU xmm m128 +// VMOVDQU ymm m256 +func VMOVDQU(mxy, mxy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVDQU", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVDQU", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVDQU", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVDQU", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsXMM(mxy) && operand.IsM128(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVDQU", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsM256(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVDQU", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + } + return nil, errors.New("VMOVDQU: bad operands") +} + +// VMOVHLPS: Move Packed Single-Precision Floating-Point Values High to Low. +// +// Forms: +// +// VMOVHLPS xmm xmm xmm +func VMOVHLPS(x, x1, x2 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(x) && operand.IsXMM(x1) && operand.IsXMM(x2): + return &intrep.Instruction{ + Opcode: "VMOVHLPS", + Operands: []operand.Op{x, x1, x2}, + Inputs: []operand.Op{x, x1}, + Outputs: []operand.Op{x2}, + }, nil + } + return nil, errors.New("VMOVHLPS: bad operands") +} + +// VMOVHPD: Move High Packed Double-Precision Floating-Point Value. +// +// Forms: +// +// VMOVHPD xmm m64 +// VMOVHPD m64 xmm xmm +func VMOVHPD(ops ...operand.Op) (*intrep.Instruction, error) { + switch { + case len(ops) == 2 && operand.IsXMM(ops[0]) && operand.IsM64(ops[1]): + return &intrep.Instruction{ + Opcode: "VMOVHPD", + Operands: ops, + Inputs: []operand.Op{ops[0]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 3 && operand.IsM64(ops[0]) && operand.IsXMM(ops[1]) && operand.IsXMM(ops[2]): + return &intrep.Instruction{ + Opcode: "VMOVHPD", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[2]}, + }, nil + } + return nil, errors.New("VMOVHPD: bad operands") +} + +// VMOVHPS: Move High Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMOVHPS xmm m64 +// VMOVHPS m64 xmm xmm +func VMOVHPS(ops ...operand.Op) (*intrep.Instruction, error) { + switch { + case len(ops) == 2 && operand.IsXMM(ops[0]) && operand.IsM64(ops[1]): + return &intrep.Instruction{ + Opcode: "VMOVHPS", + Operands: ops, + Inputs: []operand.Op{ops[0]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 3 && operand.IsM64(ops[0]) && operand.IsXMM(ops[1]) && operand.IsXMM(ops[2]): + return &intrep.Instruction{ + Opcode: "VMOVHPS", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[2]}, + }, nil + } + return nil, errors.New("VMOVHPS: bad operands") +} + +// VMOVLHPS: Move Packed Single-Precision Floating-Point Values Low to High. +// +// Forms: +// +// VMOVLHPS xmm xmm xmm +func VMOVLHPS(x, x1, x2 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(x) && operand.IsXMM(x1) && operand.IsXMM(x2): + return &intrep.Instruction{ + Opcode: "VMOVLHPS", + Operands: []operand.Op{x, x1, x2}, + Inputs: []operand.Op{x, x1}, + Outputs: []operand.Op{x2}, + }, nil + } + return nil, errors.New("VMOVLHPS: bad operands") +} + +// VMOVLPD: Move Low Packed Double-Precision Floating-Point Value. +// +// Forms: +// +// VMOVLPD xmm m64 +// VMOVLPD m64 xmm xmm +func VMOVLPD(ops ...operand.Op) (*intrep.Instruction, error) { + switch { + case len(ops) == 2 && operand.IsXMM(ops[0]) && operand.IsM64(ops[1]): + return &intrep.Instruction{ + Opcode: "VMOVLPD", + Operands: ops, + Inputs: []operand.Op{ops[0]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 3 && operand.IsM64(ops[0]) && operand.IsXMM(ops[1]) && operand.IsXMM(ops[2]): + return &intrep.Instruction{ + Opcode: "VMOVLPD", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[2]}, + }, nil + } + return nil, errors.New("VMOVLPD: bad operands") +} + +// VMOVLPS: Move Low Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMOVLPS xmm m64 +// VMOVLPS m64 xmm xmm +func VMOVLPS(ops ...operand.Op) (*intrep.Instruction, error) { + switch { + case len(ops) == 2 && operand.IsXMM(ops[0]) && operand.IsM64(ops[1]): + return &intrep.Instruction{ + Opcode: "VMOVLPS", + Operands: ops, + Inputs: []operand.Op{ops[0]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 3 && operand.IsM64(ops[0]) && operand.IsXMM(ops[1]) && operand.IsXMM(ops[2]): + return &intrep.Instruction{ + Opcode: "VMOVLPS", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[2]}, + }, nil + } + return nil, errors.New("VMOVLPS: bad operands") +} + +// VMOVMSKPD: Extract Packed Double-Precision Floating-Point Sign Mask. +// +// Forms: +// +// VMOVMSKPD xmm r32 +// VMOVMSKPD ymm r32 +func VMOVMSKPD(xy, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(xy) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "VMOVMSKPD", + Operands: []operand.Op{xy, r}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsYMM(xy) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "VMOVMSKPD", + Operands: []operand.Op{xy, r}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("VMOVMSKPD: bad operands") +} + +// VMOVMSKPS: Extract Packed Single-Precision Floating-Point Sign Mask. +// +// Forms: +// +// VMOVMSKPS xmm r32 +// VMOVMSKPS ymm r32 +func VMOVMSKPS(xy, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(xy) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "VMOVMSKPS", + Operands: []operand.Op{xy, r}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsYMM(xy) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "VMOVMSKPS", + Operands: []operand.Op{xy, r}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("VMOVMSKPS: bad operands") +} + +// VMOVNTDQ: Store Double Quadword Using Non-Temporal Hint. +// +// Forms: +// +// VMOVNTDQ xmm m128 +// VMOVNTDQ ymm m256 +func VMOVNTDQ(xy, m operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(xy) && operand.IsM128(m): + return &intrep.Instruction{ + Opcode: "VMOVNTDQ", + Operands: []operand.Op{xy, m}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{m}, + }, nil + case operand.IsYMM(xy) && operand.IsM256(m): + return &intrep.Instruction{ + Opcode: "VMOVNTDQ", + Operands: []operand.Op{xy, m}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{m}, + }, nil + } + return nil, errors.New("VMOVNTDQ: bad operands") +} + +// VMOVNTDQA: Load Double Quadword Non-Temporal Aligned Hint. +// +// Forms: +// +// VMOVNTDQA m128 xmm +// VMOVNTDQA m256 ymm +func VMOVNTDQA(m, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM128(m) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VMOVNTDQA", + Operands: []operand.Op{m, xy}, + Inputs: []operand.Op{m}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM256(m) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VMOVNTDQA", + Operands: []operand.Op{m, xy}, + Inputs: []operand.Op{m}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VMOVNTDQA: bad operands") +} + +// VMOVNTPD: Store Packed Double-Precision Floating-Point Values Using Non-Temporal Hint. +// +// Forms: +// +// VMOVNTPD xmm m128 +// VMOVNTPD ymm m256 +func VMOVNTPD(xy, m operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(xy) && operand.IsM128(m): + return &intrep.Instruction{ + Opcode: "VMOVNTPD", + Operands: []operand.Op{xy, m}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{m}, + }, nil + case operand.IsYMM(xy) && operand.IsM256(m): + return &intrep.Instruction{ + Opcode: "VMOVNTPD", + Operands: []operand.Op{xy, m}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{m}, + }, nil + } + return nil, errors.New("VMOVNTPD: bad operands") +} + +// VMOVNTPS: Store Packed Single-Precision Floating-Point Values Using Non-Temporal Hint. +// +// Forms: +// +// VMOVNTPS xmm m128 +// VMOVNTPS ymm m256 +func VMOVNTPS(xy, m operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(xy) && operand.IsM128(m): + return &intrep.Instruction{ + Opcode: "VMOVNTPS", + Operands: []operand.Op{xy, m}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{m}, + }, nil + case operand.IsYMM(xy) && operand.IsM256(m): + return &intrep.Instruction{ + Opcode: "VMOVNTPS", + Operands: []operand.Op{xy, m}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{m}, + }, nil + } + return nil, errors.New("VMOVNTPS: bad operands") +} + +// VMOVQ: Move Quadword. +// +// Forms: +// +// VMOVQ xmm r64 +// VMOVQ r64 xmm +// VMOVQ xmm xmm +// VMOVQ m64 xmm +// VMOVQ xmm m64 +func VMOVQ(mrx, mrx1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mrx) && operand.IsR64(mrx1): + return &intrep.Instruction{ + Opcode: "VMOVQ", + Operands: []operand.Op{mrx, mrx1}, + Inputs: []operand.Op{mrx}, + Outputs: []operand.Op{mrx1}, + }, nil + case operand.IsR64(mrx) && operand.IsXMM(mrx1): + return &intrep.Instruction{ + Opcode: "VMOVQ", + Operands: []operand.Op{mrx, mrx1}, + Inputs: []operand.Op{mrx}, + Outputs: []operand.Op{mrx1}, + }, nil + case operand.IsXMM(mrx) && operand.IsXMM(mrx1): + return &intrep.Instruction{ + Opcode: "VMOVQ", + Operands: []operand.Op{mrx, mrx1}, + Inputs: []operand.Op{mrx}, + Outputs: []operand.Op{mrx1}, + }, nil + case operand.IsM64(mrx) && operand.IsXMM(mrx1): + return &intrep.Instruction{ + Opcode: "VMOVQ", + Operands: []operand.Op{mrx, mrx1}, + Inputs: []operand.Op{mrx}, + Outputs: []operand.Op{mrx1}, + }, nil + case operand.IsXMM(mrx) && operand.IsM64(mrx1): + return &intrep.Instruction{ + Opcode: "VMOVQ", + Operands: []operand.Op{mrx, mrx1}, + Inputs: []operand.Op{mrx}, + Outputs: []operand.Op{mrx1}, + }, nil + } + return nil, errors.New("VMOVQ: bad operands") +} + +// VMOVSD: Move Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// VMOVSD m64 xmm +// VMOVSD xmm m64 +// VMOVSD xmm xmm xmm +func VMOVSD(ops ...operand.Op) (*intrep.Instruction, error) { + switch { + case len(ops) == 2 && operand.IsM64(ops[0]) && operand.IsXMM(ops[1]): + return &intrep.Instruction{ + Opcode: "VMOVSD", + Operands: ops, + Inputs: []operand.Op{ops[0]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsXMM(ops[0]) && operand.IsM64(ops[1]): + return &intrep.Instruction{ + Opcode: "VMOVSD", + Operands: ops, + Inputs: []operand.Op{ops[0]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 3 && operand.IsXMM(ops[0]) && operand.IsXMM(ops[1]) && operand.IsXMM(ops[2]): + return &intrep.Instruction{ + Opcode: "VMOVSD", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[2]}, + }, nil + } + return nil, errors.New("VMOVSD: bad operands") +} + +// VMOVSHDUP: Move Packed Single-FP High and Duplicate. +// +// Forms: +// +// VMOVSHDUP xmm xmm +// VMOVSHDUP m128 xmm +// VMOVSHDUP ymm ymm +// VMOVSHDUP m256 ymm +func VMOVSHDUP(mxy, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VMOVSHDUP", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VMOVSHDUP", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VMOVSHDUP", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VMOVSHDUP", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VMOVSHDUP: bad operands") +} + +// VMOVSLDUP: Move Packed Single-FP Low and Duplicate. +// +// Forms: +// +// VMOVSLDUP xmm xmm +// VMOVSLDUP m128 xmm +// VMOVSLDUP ymm ymm +// VMOVSLDUP m256 ymm +func VMOVSLDUP(mxy, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VMOVSLDUP", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VMOVSLDUP", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VMOVSLDUP", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VMOVSLDUP", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VMOVSLDUP: bad operands") +} + +// VMOVSS: Move Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VMOVSS m32 xmm +// VMOVSS xmm m32 +// VMOVSS xmm xmm xmm +func VMOVSS(ops ...operand.Op) (*intrep.Instruction, error) { + switch { + case len(ops) == 2 && operand.IsM32(ops[0]) && operand.IsXMM(ops[1]): + return &intrep.Instruction{ + Opcode: "VMOVSS", + Operands: ops, + Inputs: []operand.Op{ops[0]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 2 && operand.IsXMM(ops[0]) && operand.IsM32(ops[1]): + return &intrep.Instruction{ + Opcode: "VMOVSS", + Operands: ops, + Inputs: []operand.Op{ops[0]}, + Outputs: []operand.Op{ops[1]}, + }, nil + case len(ops) == 3 && operand.IsXMM(ops[0]) && operand.IsXMM(ops[1]) && operand.IsXMM(ops[2]): + return &intrep.Instruction{ + Opcode: "VMOVSS", + Operands: ops, + Inputs: []operand.Op{ops[0], ops[1]}, + Outputs: []operand.Op{ops[2]}, + }, nil + } + return nil, errors.New("VMOVSS: bad operands") +} + +// VMOVUPD: Move Unaligned Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VMOVUPD xmm xmm +// VMOVUPD m128 xmm +// VMOVUPD ymm ymm +// VMOVUPD m256 ymm +// VMOVUPD xmm m128 +// VMOVUPD ymm m256 +func VMOVUPD(mxy, mxy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVUPD", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVUPD", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVUPD", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVUPD", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsXMM(mxy) && operand.IsM128(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVUPD", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsM256(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVUPD", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + } + return nil, errors.New("VMOVUPD: bad operands") +} + +// VMOVUPS: Move Unaligned Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMOVUPS xmm xmm +// VMOVUPS m128 xmm +// VMOVUPS ymm ymm +// VMOVUPS m256 ymm +// VMOVUPS xmm m128 +// VMOVUPS ymm m256 +func VMOVUPS(mxy, mxy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVUPS", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVUPS", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVUPS", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVUPS", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsXMM(mxy) && operand.IsM128(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVUPS", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsM256(mxy1): + return &intrep.Instruction{ + Opcode: "VMOVUPS", + Operands: []operand.Op{mxy, mxy1}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{mxy1}, + }, nil + } + return nil, errors.New("VMOVUPS: bad operands") +} + +// VMPSADBW: Compute Multiple Packed Sums of Absolute Difference. +// +// Forms: +// +// VMPSADBW imm8 xmm xmm xmm +// VMPSADBW imm8 m128 xmm xmm +// VMPSADBW imm8 ymm ymm ymm +// VMPSADBW imm8 m256 ymm ymm +func VMPSADBW(i, mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VMPSADBW", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VMPSADBW", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VMPSADBW", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VMPSADBW", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VMPSADBW: bad operands") +} + +// VMULPD: Multiply Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VMULPD xmm xmm xmm +// VMULPD m128 xmm xmm +// VMULPD ymm ymm ymm +// VMULPD m256 ymm ymm +func VMULPD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VMULPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VMULPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VMULPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VMULPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VMULPD: bad operands") +} + +// VMULPS: Multiply Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VMULPS xmm xmm xmm +// VMULPS m128 xmm xmm +// VMULPS ymm ymm ymm +// VMULPS m256 ymm ymm +func VMULPS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VMULPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VMULPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VMULPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VMULPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VMULPS: bad operands") +} + +// VMULSD: Multiply Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VMULSD xmm xmm xmm +// VMULSD m64 xmm xmm +func VMULSD(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VMULSD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VMULSD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VMULSD: bad operands") +} + +// VMULSS: Multiply Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VMULSS xmm xmm xmm +// VMULSS m32 xmm xmm +func VMULSS(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VMULSS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VMULSS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VMULSS: bad operands") +} + +// VORPD: Bitwise Logical OR of Double-Precision Floating-Point Values. +// +// Forms: +// +// VORPD xmm xmm xmm +// VORPD m128 xmm xmm +// VORPD ymm ymm ymm +// VORPD m256 ymm ymm +func VORPD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VORPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VORPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VORPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VORPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VORPD: bad operands") +} + +// VORPS: Bitwise Logical OR of Single-Precision Floating-Point Values. +// +// Forms: +// +// VORPS xmm xmm xmm +// VORPS m128 xmm xmm +// VORPS ymm ymm ymm +// VORPS m256 ymm ymm +func VORPS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VORPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VORPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VORPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VORPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VORPS: bad operands") +} + +// VPABSB: Packed Absolute Value of Byte Integers. +// +// Forms: +// +// VPABSB xmm xmm +// VPABSB m128 xmm +// VPABSB ymm ymm +// VPABSB m256 ymm +func VPABSB(mxy, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPABSB", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPABSB", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPABSB", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPABSB", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPABSB: bad operands") +} + +// VPABSD: Packed Absolute Value of Doubleword Integers. +// +// Forms: +// +// VPABSD xmm xmm +// VPABSD m128 xmm +// VPABSD ymm ymm +// VPABSD m256 ymm +func VPABSD(mxy, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPABSD", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPABSD", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPABSD", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPABSD", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPABSD: bad operands") +} + +// VPABSW: Packed Absolute Value of Word Integers. +// +// Forms: +// +// VPABSW xmm xmm +// VPABSW m128 xmm +// VPABSW ymm ymm +// VPABSW m256 ymm +func VPABSW(mxy, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPABSW", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPABSW", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPABSW", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPABSW", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPABSW: bad operands") +} + +// VPACKSSDW: Pack Doublewords into Words with Signed Saturation. +// +// Forms: +// +// VPACKSSDW xmm xmm xmm +// VPACKSSDW m128 xmm xmm +// VPACKSSDW ymm ymm ymm +// VPACKSSDW m256 ymm ymm +func VPACKSSDW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPACKSSDW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPACKSSDW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPACKSSDW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPACKSSDW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPACKSSDW: bad operands") +} + +// VPACKSSWB: Pack Words into Bytes with Signed Saturation. +// +// Forms: +// +// VPACKSSWB xmm xmm xmm +// VPACKSSWB m128 xmm xmm +// VPACKSSWB ymm ymm ymm +// VPACKSSWB m256 ymm ymm +func VPACKSSWB(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPACKSSWB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPACKSSWB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPACKSSWB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPACKSSWB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPACKSSWB: bad operands") +} + +// VPACKUSDW: Pack Doublewords into Words with Unsigned Saturation. +// +// Forms: +// +// VPACKUSDW xmm xmm xmm +// VPACKUSDW m128 xmm xmm +// VPACKUSDW ymm ymm ymm +// VPACKUSDW m256 ymm ymm +func VPACKUSDW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPACKUSDW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPACKUSDW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPACKUSDW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPACKUSDW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPACKUSDW: bad operands") +} + +// VPACKUSWB: Pack Words into Bytes with Unsigned Saturation. +// +// Forms: +// +// VPACKUSWB xmm xmm xmm +// VPACKUSWB m128 xmm xmm +// VPACKUSWB ymm ymm ymm +// VPACKUSWB m256 ymm ymm +func VPACKUSWB(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPACKUSWB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPACKUSWB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPACKUSWB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPACKUSWB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPACKUSWB: bad operands") +} + +// VPADDB: Add Packed Byte Integers. +// +// Forms: +// +// VPADDB xmm xmm xmm +// VPADDB m128 xmm xmm +// VPADDB ymm ymm ymm +// VPADDB m256 ymm ymm +func VPADDB(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPADDB: bad operands") +} + +// VPADDD: Add Packed Doubleword Integers. +// +// Forms: +// +// VPADDD xmm xmm xmm +// VPADDD m128 xmm xmm +// VPADDD ymm ymm ymm +// VPADDD m256 ymm ymm +func VPADDD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPADDD: bad operands") +} + +// VPADDQ: Add Packed Quadword Integers. +// +// Forms: +// +// VPADDQ xmm xmm xmm +// VPADDQ m128 xmm xmm +// VPADDQ ymm ymm ymm +// VPADDQ m256 ymm ymm +func VPADDQ(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPADDQ: bad operands") +} + +// VPADDSB: Add Packed Signed Byte Integers with Signed Saturation. +// +// Forms: +// +// VPADDSB xmm xmm xmm +// VPADDSB m128 xmm xmm +// VPADDSB ymm ymm ymm +// VPADDSB m256 ymm ymm +func VPADDSB(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPADDSB: bad operands") +} + +// VPADDSW: Add Packed Signed Word Integers with Signed Saturation. +// +// Forms: +// +// VPADDSW xmm xmm xmm +// VPADDSW m128 xmm xmm +// VPADDSW ymm ymm ymm +// VPADDSW m256 ymm ymm +func VPADDSW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPADDSW: bad operands") +} + +// VPADDUSB: Add Packed Unsigned Byte Integers with Unsigned Saturation. +// +// Forms: +// +// VPADDUSB xmm xmm xmm +// VPADDUSB m128 xmm xmm +// VPADDUSB ymm ymm ymm +// VPADDUSB m256 ymm ymm +func VPADDUSB(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDUSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDUSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDUSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDUSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPADDUSB: bad operands") +} + +// VPADDUSW: Add Packed Unsigned Word Integers with Unsigned Saturation. +// +// Forms: +// +// VPADDUSW xmm xmm xmm +// VPADDUSW m128 xmm xmm +// VPADDUSW ymm ymm ymm +// VPADDUSW m256 ymm ymm +func VPADDUSW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDUSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDUSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDUSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDUSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPADDUSW: bad operands") +} + +// VPADDW: Add Packed Word Integers. +// +// Forms: +// +// VPADDW xmm xmm xmm +// VPADDW m128 xmm xmm +// VPADDW ymm ymm ymm +// VPADDW m256 ymm ymm +func VPADDW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPADDW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPADDW: bad operands") +} + +// VPALIGNR: Packed Align Right. +// +// Forms: +// +// VPALIGNR imm8 xmm xmm xmm +// VPALIGNR imm8 m128 xmm xmm +// VPALIGNR imm8 ymm ymm ymm +// VPALIGNR imm8 m256 ymm ymm +func VPALIGNR(i, mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPALIGNR", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPALIGNR", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPALIGNR", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPALIGNR", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPALIGNR: bad operands") +} + +// VPAND: Packed Bitwise Logical AND. +// +// Forms: +// +// VPAND xmm xmm xmm +// VPAND m128 xmm xmm +// VPAND ymm ymm ymm +// VPAND m256 ymm ymm +func VPAND(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPAND", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPAND", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPAND", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPAND", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPAND: bad operands") +} + +// VPANDN: Packed Bitwise Logical AND NOT. +// +// Forms: +// +// VPANDN xmm xmm xmm +// VPANDN m128 xmm xmm +// VPANDN ymm ymm ymm +// VPANDN m256 ymm ymm +func VPANDN(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPANDN", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPANDN", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPANDN", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPANDN", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPANDN: bad operands") +} + +// VPAVGB: Average Packed Byte Integers. +// +// Forms: +// +// VPAVGB xmm xmm xmm +// VPAVGB m128 xmm xmm +// VPAVGB ymm ymm ymm +// VPAVGB m256 ymm ymm +func VPAVGB(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPAVGB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPAVGB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPAVGB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPAVGB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPAVGB: bad operands") +} + +// VPAVGW: Average Packed Word Integers. +// +// Forms: +// +// VPAVGW xmm xmm xmm +// VPAVGW m128 xmm xmm +// VPAVGW ymm ymm ymm +// VPAVGW m256 ymm ymm +func VPAVGW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPAVGW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPAVGW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPAVGW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPAVGW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPAVGW: bad operands") +} + +// VPBLENDD: Blend Packed Doublewords. +// +// Forms: +// +// VPBLENDD imm8 xmm xmm xmm +// VPBLENDD imm8 m128 xmm xmm +// VPBLENDD imm8 ymm ymm ymm +// VPBLENDD imm8 m256 ymm ymm +func VPBLENDD(i, mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPBLENDD", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPBLENDD", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPBLENDD", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPBLENDD", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPBLENDD: bad operands") +} + +// VPBLENDVB: Variable Blend Packed Bytes. +// +// Forms: +// +// VPBLENDVB xmm xmm xmm xmm +// VPBLENDVB xmm m128 xmm xmm +// VPBLENDVB ymm ymm ymm ymm +// VPBLENDVB ymm m256 ymm ymm +func VPBLENDVB(xy, mxy, xy1, xy2 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(xy) && operand.IsXMM(mxy) && operand.IsXMM(xy1) && operand.IsXMM(xy2): + return &intrep.Instruction{ + Opcode: "VPBLENDVB", + Operands: []operand.Op{xy, mxy, xy1, xy2}, + Inputs: []operand.Op{xy, mxy, xy1}, + Outputs: []operand.Op{xy2}, + }, nil + case operand.IsXMM(xy) && operand.IsM128(mxy) && operand.IsXMM(xy1) && operand.IsXMM(xy2): + return &intrep.Instruction{ + Opcode: "VPBLENDVB", + Operands: []operand.Op{xy, mxy, xy1, xy2}, + Inputs: []operand.Op{xy, mxy, xy1}, + Outputs: []operand.Op{xy2}, + }, nil + case operand.IsYMM(xy) && operand.IsYMM(mxy) && operand.IsYMM(xy1) && operand.IsYMM(xy2): + return &intrep.Instruction{ + Opcode: "VPBLENDVB", + Operands: []operand.Op{xy, mxy, xy1, xy2}, + Inputs: []operand.Op{xy, mxy, xy1}, + Outputs: []operand.Op{xy2}, + }, nil + case operand.IsYMM(xy) && operand.IsM256(mxy) && operand.IsYMM(xy1) && operand.IsYMM(xy2): + return &intrep.Instruction{ + Opcode: "VPBLENDVB", + Operands: []operand.Op{xy, mxy, xy1, xy2}, + Inputs: []operand.Op{xy, mxy, xy1}, + Outputs: []operand.Op{xy2}, + }, nil + } + return nil, errors.New("VPBLENDVB: bad operands") +} + +// VPBLENDW: Blend Packed Words. +// +// Forms: +// +// VPBLENDW imm8 xmm xmm xmm +// VPBLENDW imm8 m128 xmm xmm +// VPBLENDW imm8 ymm ymm ymm +// VPBLENDW imm8 m256 ymm ymm +func VPBLENDW(i, mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPBLENDW", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPBLENDW", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPBLENDW", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPBLENDW", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPBLENDW: bad operands") +} + +// VPBROADCASTB: Broadcast Byte Integer. +// +// Forms: +// +// VPBROADCASTB xmm xmm +// VPBROADCASTB m8 xmm +// VPBROADCASTB xmm ymm +// VPBROADCASTB m8 ymm +func VPBROADCASTB(mx, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPBROADCASTB", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM8(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPBROADCASTB", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsXMM(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPBROADCASTB", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM8(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPBROADCASTB", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPBROADCASTB: bad operands") +} + +// VPBROADCASTD: Broadcast Doubleword Integer. +// +// Forms: +// +// VPBROADCASTD xmm xmm +// VPBROADCASTD m32 xmm +// VPBROADCASTD xmm ymm +// VPBROADCASTD m32 ymm +func VPBROADCASTD(mx, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPBROADCASTD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPBROADCASTD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsXMM(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPBROADCASTD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM32(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPBROADCASTD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPBROADCASTD: bad operands") +} + +// VPBROADCASTQ: Broadcast Quadword Integer. +// +// Forms: +// +// VPBROADCASTQ xmm xmm +// VPBROADCASTQ m64 xmm +// VPBROADCASTQ xmm ymm +// VPBROADCASTQ m64 ymm +func VPBROADCASTQ(mx, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPBROADCASTQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPBROADCASTQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsXMM(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPBROADCASTQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM64(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPBROADCASTQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPBROADCASTQ: bad operands") +} + +// VPBROADCASTW: Broadcast Word Integer. +// +// Forms: +// +// VPBROADCASTW xmm xmm +// VPBROADCASTW m16 xmm +// VPBROADCASTW xmm ymm +// VPBROADCASTW m16 ymm +func VPBROADCASTW(mx, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPBROADCASTW", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM16(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPBROADCASTW", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsXMM(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPBROADCASTW", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM16(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPBROADCASTW", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPBROADCASTW: bad operands") +} + +// VPCLMULQDQ: Carry-Less Quadword Multiplication. +// +// Forms: +// +// VPCLMULQDQ imm8 xmm xmm xmm +// VPCLMULQDQ imm8 m128 xmm xmm +func VPCLMULQDQ(i, mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VPCLMULQDQ", + Operands: []operand.Op{i, mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VPCLMULQDQ", + Operands: []operand.Op{i, mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VPCLMULQDQ: bad operands") +} + +// VPCMPEQB: Compare Packed Byte Data for Equality. +// +// Forms: +// +// VPCMPEQB xmm xmm xmm +// VPCMPEQB m128 xmm xmm +// VPCMPEQB ymm ymm ymm +// VPCMPEQB m256 ymm ymm +func VPCMPEQB(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPEQB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPEQB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPEQB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPEQB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPCMPEQB: bad operands") +} + +// VPCMPEQD: Compare Packed Doubleword Data for Equality. +// +// Forms: +// +// VPCMPEQD xmm xmm xmm +// VPCMPEQD m128 xmm xmm +// VPCMPEQD ymm ymm ymm +// VPCMPEQD m256 ymm ymm +func VPCMPEQD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPEQD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPEQD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPEQD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPEQD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPCMPEQD: bad operands") +} + +// VPCMPEQQ: Compare Packed Quadword Data for Equality. +// +// Forms: +// +// VPCMPEQQ xmm xmm xmm +// VPCMPEQQ m128 xmm xmm +// VPCMPEQQ ymm ymm ymm +// VPCMPEQQ m256 ymm ymm +func VPCMPEQQ(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPEQQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPEQQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPEQQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPEQQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPCMPEQQ: bad operands") +} + +// VPCMPEQW: Compare Packed Word Data for Equality. +// +// Forms: +// +// VPCMPEQW xmm xmm xmm +// VPCMPEQW m128 xmm xmm +// VPCMPEQW ymm ymm ymm +// VPCMPEQW m256 ymm ymm +func VPCMPEQW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPEQW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPEQW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPEQW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPEQW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPCMPEQW: bad operands") +} + +// VPCMPESTRI: Packed Compare Explicit Length Strings, Return Index. +// +// Forms: +// +// VPCMPESTRI imm8 xmm xmm +// VPCMPESTRI imm8 m128 xmm +func VPCMPESTRI(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VPCMPESTRI", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x, reg.EAX, reg.EDX}, + Outputs: []operand.Op{reg.ECX}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VPCMPESTRI", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x, reg.EAX, reg.EDX}, + Outputs: []operand.Op{reg.ECX}, + }, nil + } + return nil, errors.New("VPCMPESTRI: bad operands") +} + +// VPCMPESTRM: Packed Compare Explicit Length Strings, Return Mask. +// +// Forms: +// +// VPCMPESTRM imm8 xmm xmm +// VPCMPESTRM imm8 m128 xmm +func VPCMPESTRM(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VPCMPESTRM", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x, reg.EAX, reg.EDX}, + Outputs: []operand.Op{reg.X0}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VPCMPESTRM", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x, reg.EAX, reg.EDX}, + Outputs: []operand.Op{reg.X0}, + }, nil + } + return nil, errors.New("VPCMPESTRM: bad operands") +} + +// VPCMPGTB: Compare Packed Signed Byte Integers for Greater Than. +// +// Forms: +// +// VPCMPGTB xmm xmm xmm +// VPCMPGTB m128 xmm xmm +// VPCMPGTB ymm ymm ymm +// VPCMPGTB m256 ymm ymm +func VPCMPGTB(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPGTB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPGTB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPGTB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPGTB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPCMPGTB: bad operands") +} + +// VPCMPGTD: Compare Packed Signed Doubleword Integers for Greater Than. +// +// Forms: +// +// VPCMPGTD xmm xmm xmm +// VPCMPGTD m128 xmm xmm +// VPCMPGTD ymm ymm ymm +// VPCMPGTD m256 ymm ymm +func VPCMPGTD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPGTD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPGTD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPGTD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPGTD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPCMPGTD: bad operands") +} + +// VPCMPGTQ: Compare Packed Data for Greater Than. +// +// Forms: +// +// VPCMPGTQ xmm xmm xmm +// VPCMPGTQ m128 xmm xmm +// VPCMPGTQ ymm ymm ymm +// VPCMPGTQ m256 ymm ymm +func VPCMPGTQ(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPGTQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPGTQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPGTQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPGTQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPCMPGTQ: bad operands") +} + +// VPCMPGTW: Compare Packed Signed Word Integers for Greater Than. +// +// Forms: +// +// VPCMPGTW xmm xmm xmm +// VPCMPGTW m128 xmm xmm +// VPCMPGTW ymm ymm ymm +// VPCMPGTW m256 ymm ymm +func VPCMPGTW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPGTW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPGTW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPGTW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPCMPGTW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPCMPGTW: bad operands") +} + +// VPCMPISTRI: Packed Compare Implicit Length Strings, Return Index. +// +// Forms: +// +// VPCMPISTRI imm8 xmm xmm +// VPCMPISTRI imm8 m128 xmm +func VPCMPISTRI(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VPCMPISTRI", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{reg.ECX}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VPCMPISTRI", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{reg.ECX}, + }, nil + } + return nil, errors.New("VPCMPISTRI: bad operands") +} + +// VPCMPISTRM: Packed Compare Implicit Length Strings, Return Mask. +// +// Forms: +// +// VPCMPISTRM imm8 xmm xmm +// VPCMPISTRM imm8 m128 xmm +func VPCMPISTRM(i, mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VPCMPISTRM", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{reg.X0}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VPCMPISTRM", + Operands: []operand.Op{i, mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{reg.X0}, + }, nil + } + return nil, errors.New("VPCMPISTRM: bad operands") +} + +// VPERM2F128: Permute Floating-Point Values. +// +// Forms: +// +// VPERM2F128 imm8 ymm ymm ymm +// VPERM2F128 imm8 m256 ymm ymm +func VPERM2F128(i, my, y, y1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsYMM(my) && operand.IsYMM(y) && operand.IsYMM(y1): + return &intrep.Instruction{ + Opcode: "VPERM2F128", + Operands: []operand.Op{i, my, y, y1}, + Inputs: []operand.Op{my, y}, + Outputs: []operand.Op{y1}, + }, nil + case operand.IsIMM8(i) && operand.IsM256(my) && operand.IsYMM(y) && operand.IsYMM(y1): + return &intrep.Instruction{ + Opcode: "VPERM2F128", + Operands: []operand.Op{i, my, y, y1}, + Inputs: []operand.Op{my, y}, + Outputs: []operand.Op{y1}, + }, nil + } + return nil, errors.New("VPERM2F128: bad operands") +} + +// VPERM2I128: Permute 128-Bit Integer Values. +// +// Forms: +// +// VPERM2I128 imm8 ymm ymm ymm +// VPERM2I128 imm8 m256 ymm ymm +func VPERM2I128(i, my, y, y1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsYMM(my) && operand.IsYMM(y) && operand.IsYMM(y1): + return &intrep.Instruction{ + Opcode: "VPERM2I128", + Operands: []operand.Op{i, my, y, y1}, + Inputs: []operand.Op{my, y}, + Outputs: []operand.Op{y1}, + }, nil + case operand.IsIMM8(i) && operand.IsM256(my) && operand.IsYMM(y) && operand.IsYMM(y1): + return &intrep.Instruction{ + Opcode: "VPERM2I128", + Operands: []operand.Op{i, my, y, y1}, + Inputs: []operand.Op{my, y}, + Outputs: []operand.Op{y1}, + }, nil + } + return nil, errors.New("VPERM2I128: bad operands") +} + +// VPERMD: Permute Doubleword Integers. +// +// Forms: +// +// VPERMD ymm ymm ymm +// VPERMD m256 ymm ymm +func VPERMD(my, y, y1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsYMM(my) && operand.IsYMM(y) && operand.IsYMM(y1): + return &intrep.Instruction{ + Opcode: "VPERMD", + Operands: []operand.Op{my, y, y1}, + Inputs: []operand.Op{my, y}, + Outputs: []operand.Op{y1}, + }, nil + case operand.IsM256(my) && operand.IsYMM(y) && operand.IsYMM(y1): + return &intrep.Instruction{ + Opcode: "VPERMD", + Operands: []operand.Op{my, y, y1}, + Inputs: []operand.Op{my, y}, + Outputs: []operand.Op{y1}, + }, nil + } + return nil, errors.New("VPERMD: bad operands") +} + +// VPERMILPD: Permute Double-Precision Floating-Point Values. +// +// Forms: +// +// VPERMILPD imm8 xmm xmm +// VPERMILPD xmm xmm xmm +// VPERMILPD m128 xmm xmm +// VPERMILPD imm8 m128 xmm +// VPERMILPD imm8 ymm ymm +// VPERMILPD ymm ymm ymm +// VPERMILPD m256 ymm ymm +// VPERMILPD imm8 m256 ymm +func VPERMILPD(imxy, mxy, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imxy) && operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPERMILPD", + Operands: []operand.Op{imxy, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsXMM(imxy) && operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPERMILPD", + Operands: []operand.Op{imxy, mxy, xy}, + Inputs: []operand.Op{imxy, mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM128(imxy) && operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPERMILPD", + Operands: []operand.Op{imxy, mxy, xy}, + Inputs: []operand.Op{imxy, mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsIMM8(imxy) && operand.IsM128(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPERMILPD", + Operands: []operand.Op{imxy, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsIMM8(imxy) && operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPERMILPD", + Operands: []operand.Op{imxy, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsYMM(imxy) && operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPERMILPD", + Operands: []operand.Op{imxy, mxy, xy}, + Inputs: []operand.Op{imxy, mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM256(imxy) && operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPERMILPD", + Operands: []operand.Op{imxy, mxy, xy}, + Inputs: []operand.Op{imxy, mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsIMM8(imxy) && operand.IsM256(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPERMILPD", + Operands: []operand.Op{imxy, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPERMILPD: bad operands") +} + +// VPERMILPS: Permute Single-Precision Floating-Point Values. +// +// Forms: +// +// VPERMILPS imm8 xmm xmm +// VPERMILPS xmm xmm xmm +// VPERMILPS m128 xmm xmm +// VPERMILPS imm8 m128 xmm +// VPERMILPS imm8 ymm ymm +// VPERMILPS ymm ymm ymm +// VPERMILPS m256 ymm ymm +// VPERMILPS imm8 m256 ymm +func VPERMILPS(imxy, mxy, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imxy) && operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPERMILPS", + Operands: []operand.Op{imxy, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsXMM(imxy) && operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPERMILPS", + Operands: []operand.Op{imxy, mxy, xy}, + Inputs: []operand.Op{imxy, mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM128(imxy) && operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPERMILPS", + Operands: []operand.Op{imxy, mxy, xy}, + Inputs: []operand.Op{imxy, mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsIMM8(imxy) && operand.IsM128(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPERMILPS", + Operands: []operand.Op{imxy, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsIMM8(imxy) && operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPERMILPS", + Operands: []operand.Op{imxy, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsYMM(imxy) && operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPERMILPS", + Operands: []operand.Op{imxy, mxy, xy}, + Inputs: []operand.Op{imxy, mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM256(imxy) && operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPERMILPS", + Operands: []operand.Op{imxy, mxy, xy}, + Inputs: []operand.Op{imxy, mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsIMM8(imxy) && operand.IsM256(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPERMILPS", + Operands: []operand.Op{imxy, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPERMILPS: bad operands") +} + +// VPERMPD: Permute Double-Precision Floating-Point Elements. +// +// Forms: +// +// VPERMPD imm8 ymm ymm +// VPERMPD imm8 m256 ymm +func VPERMPD(i, my, y operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsYMM(my) && operand.IsYMM(y): + return &intrep.Instruction{ + Opcode: "VPERMPD", + Operands: []operand.Op{i, my, y}, + Inputs: []operand.Op{my}, + Outputs: []operand.Op{y}, + }, nil + case operand.IsIMM8(i) && operand.IsM256(my) && operand.IsYMM(y): + return &intrep.Instruction{ + Opcode: "VPERMPD", + Operands: []operand.Op{i, my, y}, + Inputs: []operand.Op{my}, + Outputs: []operand.Op{y}, + }, nil + } + return nil, errors.New("VPERMPD: bad operands") +} + +// VPERMPS: Permute Single-Precision Floating-Point Elements. +// +// Forms: +// +// VPERMPS ymm ymm ymm +// VPERMPS m256 ymm ymm +func VPERMPS(my, y, y1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsYMM(my) && operand.IsYMM(y) && operand.IsYMM(y1): + return &intrep.Instruction{ + Opcode: "VPERMPS", + Operands: []operand.Op{my, y, y1}, + Inputs: []operand.Op{my, y}, + Outputs: []operand.Op{y1}, + }, nil + case operand.IsM256(my) && operand.IsYMM(y) && operand.IsYMM(y1): + return &intrep.Instruction{ + Opcode: "VPERMPS", + Operands: []operand.Op{my, y, y1}, + Inputs: []operand.Op{my, y}, + Outputs: []operand.Op{y1}, + }, nil + } + return nil, errors.New("VPERMPS: bad operands") +} + +// VPERMQ: Permute Quadword Integers. +// +// Forms: +// +// VPERMQ imm8 ymm ymm +// VPERMQ imm8 m256 ymm +func VPERMQ(i, my, y operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsYMM(my) && operand.IsYMM(y): + return &intrep.Instruction{ + Opcode: "VPERMQ", + Operands: []operand.Op{i, my, y}, + Inputs: []operand.Op{my}, + Outputs: []operand.Op{y}, + }, nil + case operand.IsIMM8(i) && operand.IsM256(my) && operand.IsYMM(y): + return &intrep.Instruction{ + Opcode: "VPERMQ", + Operands: []operand.Op{i, my, y}, + Inputs: []operand.Op{my}, + Outputs: []operand.Op{y}, + }, nil + } + return nil, errors.New("VPERMQ: bad operands") +} + +// VPEXTRB: Extract Byte. +// +// Forms: +// +// VPEXTRB imm8 xmm r32 +// VPEXTRB imm8 xmm m8 +func VPEXTRB(i, x, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(x) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "VPEXTRB", + Operands: []operand.Op{i, x, mr}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(i) && operand.IsXMM(x) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "VPEXTRB", + Operands: []operand.Op{i, x, mr}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("VPEXTRB: bad operands") +} + +// VPEXTRD: Extract Doubleword. +// +// Forms: +// +// VPEXTRD imm8 xmm r32 +// VPEXTRD imm8 xmm m32 +func VPEXTRD(i, x, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(x) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "VPEXTRD", + Operands: []operand.Op{i, x, mr}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(i) && operand.IsXMM(x) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "VPEXTRD", + Operands: []operand.Op{i, x, mr}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("VPEXTRD: bad operands") +} + +// VPEXTRQ: Extract Quadword. +// +// Forms: +// +// VPEXTRQ imm8 xmm r64 +// VPEXTRQ imm8 xmm m64 +func VPEXTRQ(i, x, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(x) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "VPEXTRQ", + Operands: []operand.Op{i, x, mr}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(i) && operand.IsXMM(x) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "VPEXTRQ", + Operands: []operand.Op{i, x, mr}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("VPEXTRQ: bad operands") +} + +// VPEXTRW: Extract Word. +// +// Forms: +// +// VPEXTRW imm8 xmm r32 +// VPEXTRW imm8 xmm m16 +func VPEXTRW(i, x, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(x) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "VPEXTRW", + Operands: []operand.Op{i, x, mr}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(i) && operand.IsXMM(x) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "VPEXTRW", + Operands: []operand.Op{i, x, mr}, + Inputs: []operand.Op{x}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("VPEXTRW: bad operands") +} + +// VPGATHERDD: Gather Packed Doubleword Values Using Signed Doubleword Indices. +// +// Forms: +// +// VPGATHERDD xmm vm32x xmm +// VPGATHERDD ymm vm32y ymm +func VPGATHERDD(xy, v, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(xy) && operand.IsVM32X(v) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPGATHERDD", + Operands: []operand.Op{xy, v, xy1}, + Inputs: []operand.Op{xy, v, xy1}, + Outputs: []operand.Op{xy, xy1}, + }, nil + case operand.IsYMM(xy) && operand.IsVM32Y(v) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPGATHERDD", + Operands: []operand.Op{xy, v, xy1}, + Inputs: []operand.Op{xy, v, xy1}, + Outputs: []operand.Op{xy, xy1}, + }, nil + } + return nil, errors.New("VPGATHERDD: bad operands") +} + +// VPGATHERDQ: Gather Packed Quadword Values Using Signed Doubleword Indices. +// +// Forms: +// +// VPGATHERDQ xmm vm32x xmm +// VPGATHERDQ ymm vm32x ymm +func VPGATHERDQ(xy, v, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(xy) && operand.IsVM32X(v) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPGATHERDQ", + Operands: []operand.Op{xy, v, xy1}, + Inputs: []operand.Op{xy, v, xy1}, + Outputs: []operand.Op{xy, xy1}, + }, nil + case operand.IsYMM(xy) && operand.IsVM32X(v) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPGATHERDQ", + Operands: []operand.Op{xy, v, xy1}, + Inputs: []operand.Op{xy, v, xy1}, + Outputs: []operand.Op{xy, xy1}, + }, nil + } + return nil, errors.New("VPGATHERDQ: bad operands") +} + +// VPGATHERQD: Gather Packed Doubleword Values Using Signed Quadword Indices. +// +// Forms: +// +// VPGATHERQD xmm vm64x xmm +// VPGATHERQD xmm vm64y xmm +func VPGATHERQD(x, v, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(x) && operand.IsVM64X(v) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VPGATHERQD", + Operands: []operand.Op{x, v, x1}, + Inputs: []operand.Op{x, v, x1}, + Outputs: []operand.Op{x, x1}, + }, nil + case operand.IsXMM(x) && operand.IsVM64Y(v) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VPGATHERQD", + Operands: []operand.Op{x, v, x1}, + Inputs: []operand.Op{x, v, x1}, + Outputs: []operand.Op{x, x1}, + }, nil + } + return nil, errors.New("VPGATHERQD: bad operands") +} + +// VPGATHERQQ: Gather Packed Quadword Values Using Signed Quadword Indices. +// +// Forms: +// +// VPGATHERQQ xmm vm64x xmm +// VPGATHERQQ ymm vm64y ymm +func VPGATHERQQ(xy, v, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(xy) && operand.IsVM64X(v) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPGATHERQQ", + Operands: []operand.Op{xy, v, xy1}, + Inputs: []operand.Op{xy, v, xy1}, + Outputs: []operand.Op{xy, xy1}, + }, nil + case operand.IsYMM(xy) && operand.IsVM64Y(v) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPGATHERQQ", + Operands: []operand.Op{xy, v, xy1}, + Inputs: []operand.Op{xy, v, xy1}, + Outputs: []operand.Op{xy, xy1}, + }, nil + } + return nil, errors.New("VPGATHERQQ: bad operands") +} + +// VPHADDD: Packed Horizontal Add Doubleword Integer. +// +// Forms: +// +// VPHADDD xmm xmm xmm +// VPHADDD m128 xmm xmm +// VPHADDD ymm ymm ymm +// VPHADDD m256 ymm ymm +func VPHADDD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHADDD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHADDD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHADDD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHADDD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPHADDD: bad operands") +} + +// VPHADDSW: Packed Horizontal Add Signed Word Integers with Signed Saturation. +// +// Forms: +// +// VPHADDSW xmm xmm xmm +// VPHADDSW m128 xmm xmm +// VPHADDSW ymm ymm ymm +// VPHADDSW m256 ymm ymm +func VPHADDSW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHADDSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHADDSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHADDSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHADDSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPHADDSW: bad operands") +} + +// VPHADDW: Packed Horizontal Add Word Integers. +// +// Forms: +// +// VPHADDW xmm xmm xmm +// VPHADDW m128 xmm xmm +// VPHADDW ymm ymm ymm +// VPHADDW m256 ymm ymm +func VPHADDW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHADDW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHADDW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHADDW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHADDW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPHADDW: bad operands") +} + +// VPHMINPOSUW: Packed Horizontal Minimum of Unsigned Word Integers. +// +// Forms: +// +// VPHMINPOSUW xmm xmm +// VPHMINPOSUW m128 xmm +func VPHMINPOSUW(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VPHMINPOSUW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VPHMINPOSUW", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("VPHMINPOSUW: bad operands") +} + +// VPHSUBD: Packed Horizontal Subtract Doubleword Integers. +// +// Forms: +// +// VPHSUBD xmm xmm xmm +// VPHSUBD m128 xmm xmm +// VPHSUBD ymm ymm ymm +// VPHSUBD m256 ymm ymm +func VPHSUBD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHSUBD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHSUBD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHSUBD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHSUBD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPHSUBD: bad operands") +} + +// VPHSUBSW: Packed Horizontal Subtract Signed Word Integers with Signed Saturation. +// +// Forms: +// +// VPHSUBSW xmm xmm xmm +// VPHSUBSW m128 xmm xmm +// VPHSUBSW ymm ymm ymm +// VPHSUBSW m256 ymm ymm +func VPHSUBSW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHSUBSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHSUBSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHSUBSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHSUBSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPHSUBSW: bad operands") +} + +// VPHSUBW: Packed Horizontal Subtract Word Integers. +// +// Forms: +// +// VPHSUBW xmm xmm xmm +// VPHSUBW m128 xmm xmm +// VPHSUBW ymm ymm ymm +// VPHSUBW m256 ymm ymm +func VPHSUBW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHSUBW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHSUBW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHSUBW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPHSUBW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPHSUBW: bad operands") +} + +// VPINSRB: Insert Byte. +// +// Forms: +// +// VPINSRB imm8 r32 xmm xmm +// VPINSRB imm8 m8 xmm xmm +func VPINSRB(i, mr, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsR32(mr) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VPINSRB", + Operands: []operand.Op{i, mr, x, x1}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsIMM8(i) && operand.IsM8(mr) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VPINSRB", + Operands: []operand.Op{i, mr, x, x1}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VPINSRB: bad operands") +} + +// VPINSRD: Insert Doubleword. +// +// Forms: +// +// VPINSRD imm8 r32 xmm xmm +// VPINSRD imm8 m32 xmm xmm +func VPINSRD(i, mr, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsR32(mr) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VPINSRD", + Operands: []operand.Op{i, mr, x, x1}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsIMM8(i) && operand.IsM32(mr) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VPINSRD", + Operands: []operand.Op{i, mr, x, x1}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VPINSRD: bad operands") +} + +// VPINSRQ: Insert Quadword. +// +// Forms: +// +// VPINSRQ imm8 r64 xmm xmm +// VPINSRQ imm8 m64 xmm xmm +func VPINSRQ(i, mr, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsR64(mr) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VPINSRQ", + Operands: []operand.Op{i, mr, x, x1}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsIMM8(i) && operand.IsM64(mr) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VPINSRQ", + Operands: []operand.Op{i, mr, x, x1}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VPINSRQ: bad operands") +} + +// VPINSRW: Insert Word. +// +// Forms: +// +// VPINSRW imm8 r32 xmm xmm +// VPINSRW imm8 m16 xmm xmm +func VPINSRW(i, mr, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsR32(mr) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VPINSRW", + Operands: []operand.Op{i, mr, x, x1}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsIMM8(i) && operand.IsM16(mr) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VPINSRW", + Operands: []operand.Op{i, mr, x, x1}, + Inputs: []operand.Op{mr, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VPINSRW: bad operands") +} + +// VPMADDUBSW: Multiply and Add Packed Signed and Unsigned Byte Integers. +// +// Forms: +// +// VPMADDUBSW xmm xmm xmm +// VPMADDUBSW m128 xmm xmm +// VPMADDUBSW ymm ymm ymm +// VPMADDUBSW m256 ymm ymm +func VPMADDUBSW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMADDUBSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMADDUBSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMADDUBSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMADDUBSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPMADDUBSW: bad operands") +} + +// VPMADDWD: Multiply and Add Packed Signed Word Integers. +// +// Forms: +// +// VPMADDWD xmm xmm xmm +// VPMADDWD m128 xmm xmm +// VPMADDWD ymm ymm ymm +// VPMADDWD m256 ymm ymm +func VPMADDWD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMADDWD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMADDWD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMADDWD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMADDWD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPMADDWD: bad operands") +} + +// VPMASKMOVD: Conditional Move Packed Doubleword Integers. +// +// Forms: +// +// VPMASKMOVD m128 xmm xmm +// VPMASKMOVD m256 ymm ymm +// VPMASKMOVD xmm xmm m128 +// VPMASKMOVD ymm ymm m256 +func VPMASKMOVD(mxy, xy, mxy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(mxy1): + return &intrep.Instruction{ + Opcode: "VPMASKMOVD", + Operands: []operand.Op{mxy, xy, mxy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(mxy1): + return &intrep.Instruction{ + Opcode: "VPMASKMOVD", + Operands: []operand.Op{mxy, xy, mxy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsM128(mxy1): + return &intrep.Instruction{ + Opcode: "VPMASKMOVD", + Operands: []operand.Op{mxy, xy, mxy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsM256(mxy1): + return &intrep.Instruction{ + Opcode: "VPMASKMOVD", + Operands: []operand.Op{mxy, xy, mxy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{mxy1}, + }, nil + } + return nil, errors.New("VPMASKMOVD: bad operands") +} + +// VPMASKMOVQ: Conditional Move Packed Quadword Integers. +// +// Forms: +// +// VPMASKMOVQ m128 xmm xmm +// VPMASKMOVQ m256 ymm ymm +// VPMASKMOVQ xmm xmm m128 +// VPMASKMOVQ ymm ymm m256 +func VPMASKMOVQ(mxy, xy, mxy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(mxy1): + return &intrep.Instruction{ + Opcode: "VPMASKMOVQ", + Operands: []operand.Op{mxy, xy, mxy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(mxy1): + return &intrep.Instruction{ + Opcode: "VPMASKMOVQ", + Operands: []operand.Op{mxy, xy, mxy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsM128(mxy1): + return &intrep.Instruction{ + Opcode: "VPMASKMOVQ", + Operands: []operand.Op{mxy, xy, mxy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{mxy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsM256(mxy1): + return &intrep.Instruction{ + Opcode: "VPMASKMOVQ", + Operands: []operand.Op{mxy, xy, mxy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{mxy1}, + }, nil + } + return nil, errors.New("VPMASKMOVQ: bad operands") +} + +// VPMAXSB: Maximum of Packed Signed Byte Integers. +// +// Forms: +// +// VPMAXSB xmm xmm xmm +// VPMAXSB m128 xmm xmm +// VPMAXSB ymm ymm ymm +// VPMAXSB m256 ymm ymm +func VPMAXSB(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPMAXSB: bad operands") +} + +// VPMAXSD: Maximum of Packed Signed Doubleword Integers. +// +// Forms: +// +// VPMAXSD xmm xmm xmm +// VPMAXSD m128 xmm xmm +// VPMAXSD ymm ymm ymm +// VPMAXSD m256 ymm ymm +func VPMAXSD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXSD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXSD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXSD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXSD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPMAXSD: bad operands") +} + +// VPMAXSW: Maximum of Packed Signed Word Integers. +// +// Forms: +// +// VPMAXSW xmm xmm xmm +// VPMAXSW m128 xmm xmm +// VPMAXSW ymm ymm ymm +// VPMAXSW m256 ymm ymm +func VPMAXSW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPMAXSW: bad operands") +} + +// VPMAXUB: Maximum of Packed Unsigned Byte Integers. +// +// Forms: +// +// VPMAXUB xmm xmm xmm +// VPMAXUB m128 xmm xmm +// VPMAXUB ymm ymm ymm +// VPMAXUB m256 ymm ymm +func VPMAXUB(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXUB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXUB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXUB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXUB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPMAXUB: bad operands") +} + +// VPMAXUD: Maximum of Packed Unsigned Doubleword Integers. +// +// Forms: +// +// VPMAXUD xmm xmm xmm +// VPMAXUD m128 xmm xmm +// VPMAXUD ymm ymm ymm +// VPMAXUD m256 ymm ymm +func VPMAXUD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXUD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXUD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXUD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXUD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPMAXUD: bad operands") +} + +// VPMAXUW: Maximum of Packed Unsigned Word Integers. +// +// Forms: +// +// VPMAXUW xmm xmm xmm +// VPMAXUW m128 xmm xmm +// VPMAXUW ymm ymm ymm +// VPMAXUW m256 ymm ymm +func VPMAXUW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXUW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXUW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXUW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMAXUW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPMAXUW: bad operands") +} + +// VPMINSB: Minimum of Packed Signed Byte Integers. +// +// Forms: +// +// VPMINSB xmm xmm xmm +// VPMINSB m128 xmm xmm +// VPMINSB ymm ymm ymm +// VPMINSB m256 ymm ymm +func VPMINSB(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPMINSB: bad operands") +} + +// VPMINSD: Minimum of Packed Signed Doubleword Integers. +// +// Forms: +// +// VPMINSD xmm xmm xmm +// VPMINSD m128 xmm xmm +// VPMINSD ymm ymm ymm +// VPMINSD m256 ymm ymm +func VPMINSD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINSD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINSD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINSD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINSD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPMINSD: bad operands") +} + +// VPMINSW: Minimum of Packed Signed Word Integers. +// +// Forms: +// +// VPMINSW xmm xmm xmm +// VPMINSW m128 xmm xmm +// VPMINSW ymm ymm ymm +// VPMINSW m256 ymm ymm +func VPMINSW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPMINSW: bad operands") +} + +// VPMINUB: Minimum of Packed Unsigned Byte Integers. +// +// Forms: +// +// VPMINUB xmm xmm xmm +// VPMINUB m128 xmm xmm +// VPMINUB ymm ymm ymm +// VPMINUB m256 ymm ymm +func VPMINUB(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINUB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINUB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINUB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINUB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPMINUB: bad operands") +} + +// VPMINUD: Minimum of Packed Unsigned Doubleword Integers. +// +// Forms: +// +// VPMINUD xmm xmm xmm +// VPMINUD m128 xmm xmm +// VPMINUD ymm ymm ymm +// VPMINUD m256 ymm ymm +func VPMINUD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINUD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINUD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINUD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINUD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPMINUD: bad operands") +} + +// VPMINUW: Minimum of Packed Unsigned Word Integers. +// +// Forms: +// +// VPMINUW xmm xmm xmm +// VPMINUW m128 xmm xmm +// VPMINUW ymm ymm ymm +// VPMINUW m256 ymm ymm +func VPMINUW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINUW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINUW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINUW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMINUW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPMINUW: bad operands") +} + +// VPMOVMSKB: Move Byte Mask. +// +// Forms: +// +// VPMOVMSKB xmm r32 +// VPMOVMSKB ymm r32 +func VPMOVMSKB(xy, r operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(xy) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "VPMOVMSKB", + Operands: []operand.Op{xy, r}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{r}, + }, nil + case operand.IsYMM(xy) && operand.IsR32(r): + return &intrep.Instruction{ + Opcode: "VPMOVMSKB", + Operands: []operand.Op{xy, r}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{r}, + }, nil + } + return nil, errors.New("VPMOVMSKB: bad operands") +} + +// VPMOVSXBD: Move Packed Byte Integers to Doubleword Integers with Sign Extension. +// +// Forms: +// +// VPMOVSXBD xmm xmm +// VPMOVSXBD m32 xmm +// VPMOVSXBD xmm ymm +// VPMOVSXBD m64 ymm +func VPMOVSXBD(mx, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXBD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXBD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsXMM(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXBD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM64(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXBD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPMOVSXBD: bad operands") +} + +// VPMOVSXBQ: Move Packed Byte Integers to Quadword Integers with Sign Extension. +// +// Forms: +// +// VPMOVSXBQ xmm xmm +// VPMOVSXBQ m16 xmm +// VPMOVSXBQ xmm ymm +// VPMOVSXBQ m32 ymm +func VPMOVSXBQ(mx, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXBQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM16(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXBQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsXMM(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXBQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM32(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXBQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPMOVSXBQ: bad operands") +} + +// VPMOVSXBW: Move Packed Byte Integers to Word Integers with Sign Extension. +// +// Forms: +// +// VPMOVSXBW xmm xmm +// VPMOVSXBW m64 xmm +// VPMOVSXBW xmm ymm +// VPMOVSXBW m128 ymm +func VPMOVSXBW(mx, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXBW", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXBW", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsXMM(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXBW", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM128(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXBW", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPMOVSXBW: bad operands") +} + +// VPMOVSXDQ: Move Packed Doubleword Integers to Quadword Integers with Sign Extension. +// +// Forms: +// +// VPMOVSXDQ xmm xmm +// VPMOVSXDQ m64 xmm +// VPMOVSXDQ xmm ymm +// VPMOVSXDQ m128 ymm +func VPMOVSXDQ(mx, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXDQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXDQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsXMM(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXDQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM128(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXDQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPMOVSXDQ: bad operands") +} + +// VPMOVSXWD: Move Packed Word Integers to Doubleword Integers with Sign Extension. +// +// Forms: +// +// VPMOVSXWD xmm xmm +// VPMOVSXWD m64 xmm +// VPMOVSXWD xmm ymm +// VPMOVSXWD m128 ymm +func VPMOVSXWD(mx, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXWD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXWD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsXMM(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXWD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM128(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXWD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPMOVSXWD: bad operands") +} + +// VPMOVSXWQ: Move Packed Word Integers to Quadword Integers with Sign Extension. +// +// Forms: +// +// VPMOVSXWQ xmm xmm +// VPMOVSXWQ m32 xmm +// VPMOVSXWQ xmm ymm +// VPMOVSXWQ m64 ymm +func VPMOVSXWQ(mx, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXWQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXWQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsXMM(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXWQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM64(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVSXWQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPMOVSXWQ: bad operands") +} + +// VPMOVZXBD: Move Packed Byte Integers to Doubleword Integers with Zero Extension. +// +// Forms: +// +// VPMOVZXBD xmm xmm +// VPMOVZXBD m32 xmm +// VPMOVZXBD xmm ymm +// VPMOVZXBD m64 ymm +func VPMOVZXBD(mx, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXBD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXBD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsXMM(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXBD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM64(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXBD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPMOVZXBD: bad operands") +} + +// VPMOVZXBQ: Move Packed Byte Integers to Quadword Integers with Zero Extension. +// +// Forms: +// +// VPMOVZXBQ xmm xmm +// VPMOVZXBQ m16 xmm +// VPMOVZXBQ xmm ymm +// VPMOVZXBQ m32 ymm +func VPMOVZXBQ(mx, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXBQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM16(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXBQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsXMM(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXBQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM32(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXBQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPMOVZXBQ: bad operands") +} + +// VPMOVZXBW: Move Packed Byte Integers to Word Integers with Zero Extension. +// +// Forms: +// +// VPMOVZXBW xmm xmm +// VPMOVZXBW m64 xmm +// VPMOVZXBW xmm ymm +// VPMOVZXBW m128 ymm +func VPMOVZXBW(mx, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXBW", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXBW", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsXMM(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXBW", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM128(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXBW", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPMOVZXBW: bad operands") +} + +// VPMOVZXDQ: Move Packed Doubleword Integers to Quadword Integers with Zero Extension. +// +// Forms: +// +// VPMOVZXDQ xmm xmm +// VPMOVZXDQ m64 xmm +// VPMOVZXDQ xmm ymm +// VPMOVZXDQ m128 ymm +func VPMOVZXDQ(mx, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXDQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXDQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsXMM(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXDQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM128(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXDQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPMOVZXDQ: bad operands") +} + +// VPMOVZXWD: Move Packed Word Integers to Doubleword Integers with Zero Extension. +// +// Forms: +// +// VPMOVZXWD xmm xmm +// VPMOVZXWD m64 xmm +// VPMOVZXWD xmm ymm +// VPMOVZXWD m128 ymm +func VPMOVZXWD(mx, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXWD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXWD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsXMM(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXWD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM128(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXWD", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPMOVZXWD: bad operands") +} + +// VPMOVZXWQ: Move Packed Word Integers to Quadword Integers with Zero Extension. +// +// Forms: +// +// VPMOVZXWQ xmm xmm +// VPMOVZXWQ m32 xmm +// VPMOVZXWQ xmm ymm +// VPMOVZXWQ m64 ymm +func VPMOVZXWQ(mx, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXWQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXWQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsXMM(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXWQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM64(mx) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPMOVZXWQ", + Operands: []operand.Op{mx, xy}, + Inputs: []operand.Op{mx}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPMOVZXWQ: bad operands") +} + +// VPMULDQ: Multiply Packed Signed Doubleword Integers and Store Quadword Result. +// +// Forms: +// +// VPMULDQ xmm xmm xmm +// VPMULDQ m128 xmm xmm +// VPMULDQ ymm ymm ymm +// VPMULDQ m256 ymm ymm +func VPMULDQ(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPMULDQ: bad operands") +} + +// VPMULHRSW: Packed Multiply Signed Word Integers and Store High Result with Round and Scale. +// +// Forms: +// +// VPMULHRSW xmm xmm xmm +// VPMULHRSW m128 xmm xmm +// VPMULHRSW ymm ymm ymm +// VPMULHRSW m256 ymm ymm +func VPMULHRSW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULHRSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULHRSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULHRSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULHRSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPMULHRSW: bad operands") +} + +// VPMULHUW: Multiply Packed Unsigned Word Integers and Store High Result. +// +// Forms: +// +// VPMULHUW xmm xmm xmm +// VPMULHUW m128 xmm xmm +// VPMULHUW ymm ymm ymm +// VPMULHUW m256 ymm ymm +func VPMULHUW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULHUW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULHUW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULHUW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULHUW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPMULHUW: bad operands") +} + +// VPMULHW: Multiply Packed Signed Word Integers and Store High Result. +// +// Forms: +// +// VPMULHW xmm xmm xmm +// VPMULHW m128 xmm xmm +// VPMULHW ymm ymm ymm +// VPMULHW m256 ymm ymm +func VPMULHW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULHW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULHW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULHW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULHW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPMULHW: bad operands") +} + +// VPMULLD: Multiply Packed Signed Doubleword Integers and Store Low Result. +// +// Forms: +// +// VPMULLD xmm xmm xmm +// VPMULLD m128 xmm xmm +// VPMULLD ymm ymm ymm +// VPMULLD m256 ymm ymm +func VPMULLD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULLD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULLD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULLD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULLD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPMULLD: bad operands") +} + +// VPMULLW: Multiply Packed Signed Word Integers and Store Low Result. +// +// Forms: +// +// VPMULLW xmm xmm xmm +// VPMULLW m128 xmm xmm +// VPMULLW ymm ymm ymm +// VPMULLW m256 ymm ymm +func VPMULLW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULLW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULLW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULLW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULLW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPMULLW: bad operands") +} + +// VPMULUDQ: Multiply Packed Unsigned Doubleword Integers. +// +// Forms: +// +// VPMULUDQ xmm xmm xmm +// VPMULUDQ m128 xmm xmm +// VPMULUDQ ymm ymm ymm +// VPMULUDQ m256 ymm ymm +func VPMULUDQ(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULUDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULUDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULUDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPMULUDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPMULUDQ: bad operands") +} + +// VPOR: Packed Bitwise Logical OR. +// +// Forms: +// +// VPOR xmm xmm xmm +// VPOR m128 xmm xmm +// VPOR ymm ymm ymm +// VPOR m256 ymm ymm +func VPOR(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPOR", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPOR", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPOR", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPOR", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPOR: bad operands") +} + +// VPSADBW: Compute Sum of Absolute Differences. +// +// Forms: +// +// VPSADBW xmm xmm xmm +// VPSADBW m128 xmm xmm +// VPSADBW ymm ymm ymm +// VPSADBW m256 ymm ymm +func VPSADBW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSADBW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSADBW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSADBW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSADBW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSADBW: bad operands") +} + +// VPSHUFB: Packed Shuffle Bytes. +// +// Forms: +// +// VPSHUFB xmm xmm xmm +// VPSHUFB m128 xmm xmm +// VPSHUFB ymm ymm ymm +// VPSHUFB m256 ymm ymm +func VPSHUFB(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSHUFB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSHUFB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSHUFB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSHUFB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSHUFB: bad operands") +} + +// VPSHUFD: Shuffle Packed Doublewords. +// +// Forms: +// +// VPSHUFD imm8 xmm xmm +// VPSHUFD imm8 m128 xmm +// VPSHUFD imm8 ymm ymm +// VPSHUFD imm8 m256 ymm +func VPSHUFD(i, mxy, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPSHUFD", + Operands: []operand.Op{i, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPSHUFD", + Operands: []operand.Op{i, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsIMM8(i) && operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPSHUFD", + Operands: []operand.Op{i, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsIMM8(i) && operand.IsM256(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPSHUFD", + Operands: []operand.Op{i, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPSHUFD: bad operands") +} + +// VPSHUFHW: Shuffle Packed High Words. +// +// Forms: +// +// VPSHUFHW imm8 xmm xmm +// VPSHUFHW imm8 m128 xmm +// VPSHUFHW imm8 ymm ymm +// VPSHUFHW imm8 m256 ymm +func VPSHUFHW(i, mxy, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPSHUFHW", + Operands: []operand.Op{i, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPSHUFHW", + Operands: []operand.Op{i, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsIMM8(i) && operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPSHUFHW", + Operands: []operand.Op{i, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsIMM8(i) && operand.IsM256(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPSHUFHW", + Operands: []operand.Op{i, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPSHUFHW: bad operands") +} + +// VPSHUFLW: Shuffle Packed Low Words. +// +// Forms: +// +// VPSHUFLW imm8 xmm xmm +// VPSHUFLW imm8 m128 xmm +// VPSHUFLW imm8 ymm ymm +// VPSHUFLW imm8 m256 ymm +func VPSHUFLW(i, mxy, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPSHUFLW", + Operands: []operand.Op{i, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPSHUFLW", + Operands: []operand.Op{i, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsIMM8(i) && operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPSHUFLW", + Operands: []operand.Op{i, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsIMM8(i) && operand.IsM256(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPSHUFLW", + Operands: []operand.Op{i, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VPSHUFLW: bad operands") +} + +// VPSIGNB: Packed Sign of Byte Integers. +// +// Forms: +// +// VPSIGNB xmm xmm xmm +// VPSIGNB m128 xmm xmm +// VPSIGNB ymm ymm ymm +// VPSIGNB m256 ymm ymm +func VPSIGNB(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSIGNB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSIGNB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSIGNB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSIGNB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSIGNB: bad operands") +} + +// VPSIGND: Packed Sign of Doubleword Integers. +// +// Forms: +// +// VPSIGND xmm xmm xmm +// VPSIGND m128 xmm xmm +// VPSIGND ymm ymm ymm +// VPSIGND m256 ymm ymm +func VPSIGND(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSIGND", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSIGND", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSIGND", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSIGND", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSIGND: bad operands") +} + +// VPSIGNW: Packed Sign of Word Integers. +// +// Forms: +// +// VPSIGNW xmm xmm xmm +// VPSIGNW m128 xmm xmm +// VPSIGNW ymm ymm ymm +// VPSIGNW m256 ymm ymm +func VPSIGNW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSIGNW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSIGNW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSIGNW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSIGNW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSIGNW: bad operands") +} + +// VPSLLD: Shift Packed Doubleword Data Left Logical. +// +// Forms: +// +// VPSLLD imm8 xmm xmm +// VPSLLD xmm xmm xmm +// VPSLLD m128 xmm xmm +// VPSLLD imm8 ymm ymm +// VPSLLD xmm ymm ymm +// VPSLLD m128 ymm ymm +func VPSLLD(imx, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLD", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsXMM(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLD", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLD", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLD", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsXMM(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLD", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLD", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSLLD: bad operands") +} + +// VPSLLDQ: Shift Packed Double Quadword Left Logical. +// +// Forms: +// +// VPSLLDQ imm8 xmm xmm +// VPSLLDQ imm8 ymm ymm +func VPSLLDQ(i, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLDQ", + Operands: []operand.Op{i, xy, xy1}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLDQ", + Operands: []operand.Op{i, xy, xy1}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSLLDQ: bad operands") +} + +// VPSLLQ: Shift Packed Quadword Data Left Logical. +// +// Forms: +// +// VPSLLQ imm8 xmm xmm +// VPSLLQ xmm xmm xmm +// VPSLLQ m128 xmm xmm +// VPSLLQ imm8 ymm ymm +// VPSLLQ xmm ymm ymm +// VPSLLQ m128 ymm ymm +func VPSLLQ(imx, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLQ", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsXMM(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLQ", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLQ", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLQ", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsXMM(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLQ", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLQ", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSLLQ: bad operands") +} + +// VPSLLVD: Variable Shift Packed Doubleword Data Left Logical. +// +// Forms: +// +// VPSLLVD xmm xmm xmm +// VPSLLVD m128 xmm xmm +// VPSLLVD ymm ymm ymm +// VPSLLVD m256 ymm ymm +func VPSLLVD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLVD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLVD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLVD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLVD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSLLVD: bad operands") +} + +// VPSLLVQ: Variable Shift Packed Quadword Data Left Logical. +// +// Forms: +// +// VPSLLVQ xmm xmm xmm +// VPSLLVQ m128 xmm xmm +// VPSLLVQ ymm ymm ymm +// VPSLLVQ m256 ymm ymm +func VPSLLVQ(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLVQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLVQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLVQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLVQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSLLVQ: bad operands") +} + +// VPSLLW: Shift Packed Word Data Left Logical. +// +// Forms: +// +// VPSLLW imm8 xmm xmm +// VPSLLW xmm xmm xmm +// VPSLLW m128 xmm xmm +// VPSLLW imm8 ymm ymm +// VPSLLW xmm ymm ymm +// VPSLLW m128 ymm ymm +func VPSLLW(imx, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLW", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsXMM(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLW", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLW", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLW", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsXMM(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLW", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSLLW", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSLLW: bad operands") +} + +// VPSRAD: Shift Packed Doubleword Data Right Arithmetic. +// +// Forms: +// +// VPSRAD imm8 xmm xmm +// VPSRAD xmm xmm xmm +// VPSRAD m128 xmm xmm +// VPSRAD imm8 ymm ymm +// VPSRAD xmm ymm ymm +// VPSRAD m128 ymm ymm +func VPSRAD(imx, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRAD", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsXMM(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRAD", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRAD", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRAD", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsXMM(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRAD", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRAD", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSRAD: bad operands") +} + +// VPSRAVD: Variable Shift Packed Doubleword Data Right Arithmetic. +// +// Forms: +// +// VPSRAVD xmm xmm xmm +// VPSRAVD m128 xmm xmm +// VPSRAVD ymm ymm ymm +// VPSRAVD m256 ymm ymm +func VPSRAVD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRAVD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRAVD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRAVD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRAVD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSRAVD: bad operands") +} + +// VPSRAW: Shift Packed Word Data Right Arithmetic. +// +// Forms: +// +// VPSRAW imm8 xmm xmm +// VPSRAW xmm xmm xmm +// VPSRAW m128 xmm xmm +// VPSRAW imm8 ymm ymm +// VPSRAW xmm ymm ymm +// VPSRAW m128 ymm ymm +func VPSRAW(imx, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRAW", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsXMM(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRAW", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRAW", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRAW", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsXMM(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRAW", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRAW", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSRAW: bad operands") +} + +// VPSRLD: Shift Packed Doubleword Data Right Logical. +// +// Forms: +// +// VPSRLD imm8 xmm xmm +// VPSRLD xmm xmm xmm +// VPSRLD m128 xmm xmm +// VPSRLD imm8 ymm ymm +// VPSRLD xmm ymm ymm +// VPSRLD m128 ymm ymm +func VPSRLD(imx, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLD", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsXMM(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLD", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLD", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLD", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsXMM(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLD", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLD", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSRLD: bad operands") +} + +// VPSRLDQ: Shift Packed Double Quadword Right Logical. +// +// Forms: +// +// VPSRLDQ imm8 xmm xmm +// VPSRLDQ imm8 ymm ymm +func VPSRLDQ(i, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLDQ", + Operands: []operand.Op{i, xy, xy1}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLDQ", + Operands: []operand.Op{i, xy, xy1}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSRLDQ: bad operands") +} + +// VPSRLQ: Shift Packed Quadword Data Right Logical. +// +// Forms: +// +// VPSRLQ imm8 xmm xmm +// VPSRLQ xmm xmm xmm +// VPSRLQ m128 xmm xmm +// VPSRLQ imm8 ymm ymm +// VPSRLQ xmm ymm ymm +// VPSRLQ m128 ymm ymm +func VPSRLQ(imx, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLQ", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsXMM(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLQ", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLQ", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLQ", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsXMM(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLQ", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLQ", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSRLQ: bad operands") +} + +// VPSRLVD: Variable Shift Packed Doubleword Data Right Logical. +// +// Forms: +// +// VPSRLVD xmm xmm xmm +// VPSRLVD m128 xmm xmm +// VPSRLVD ymm ymm ymm +// VPSRLVD m256 ymm ymm +func VPSRLVD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLVD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLVD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLVD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLVD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSRLVD: bad operands") +} + +// VPSRLVQ: Variable Shift Packed Quadword Data Right Logical. +// +// Forms: +// +// VPSRLVQ xmm xmm xmm +// VPSRLVQ m128 xmm xmm +// VPSRLVQ ymm ymm ymm +// VPSRLVQ m256 ymm ymm +func VPSRLVQ(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLVQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLVQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLVQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLVQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSRLVQ: bad operands") +} + +// VPSRLW: Shift Packed Word Data Right Logical. +// +// Forms: +// +// VPSRLW imm8 xmm xmm +// VPSRLW xmm xmm xmm +// VPSRLW m128 xmm xmm +// VPSRLW imm8 ymm ymm +// VPSRLW xmm ymm ymm +// VPSRLW m128 ymm ymm +func VPSRLW(imx, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLW", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsXMM(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLW", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(imx) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLW", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLW", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsXMM(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLW", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(imx) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSRLW", + Operands: []operand.Op{imx, xy, xy1}, + Inputs: []operand.Op{imx, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSRLW: bad operands") +} + +// VPSUBB: Subtract Packed Byte Integers. +// +// Forms: +// +// VPSUBB xmm xmm xmm +// VPSUBB m128 xmm xmm +// VPSUBB ymm ymm ymm +// VPSUBB m256 ymm ymm +func VPSUBB(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSUBB: bad operands") +} + +// VPSUBD: Subtract Packed Doubleword Integers. +// +// Forms: +// +// VPSUBD xmm xmm xmm +// VPSUBD m128 xmm xmm +// VPSUBD ymm ymm ymm +// VPSUBD m256 ymm ymm +func VPSUBD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSUBD: bad operands") +} + +// VPSUBQ: Subtract Packed Quadword Integers. +// +// Forms: +// +// VPSUBQ xmm xmm xmm +// VPSUBQ m128 xmm xmm +// VPSUBQ ymm ymm ymm +// VPSUBQ m256 ymm ymm +func VPSUBQ(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSUBQ: bad operands") +} + +// VPSUBSB: Subtract Packed Signed Byte Integers with Signed Saturation. +// +// Forms: +// +// VPSUBSB xmm xmm xmm +// VPSUBSB m128 xmm xmm +// VPSUBSB ymm ymm ymm +// VPSUBSB m256 ymm ymm +func VPSUBSB(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSUBSB: bad operands") +} + +// VPSUBSW: Subtract Packed Signed Word Integers with Signed Saturation. +// +// Forms: +// +// VPSUBSW xmm xmm xmm +// VPSUBSW m128 xmm xmm +// VPSUBSW ymm ymm ymm +// VPSUBSW m256 ymm ymm +func VPSUBSW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSUBSW: bad operands") +} + +// VPSUBUSB: Subtract Packed Unsigned Byte Integers with Unsigned Saturation. +// +// Forms: +// +// VPSUBUSB xmm xmm xmm +// VPSUBUSB m128 xmm xmm +// VPSUBUSB ymm ymm ymm +// VPSUBUSB m256 ymm ymm +func VPSUBUSB(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBUSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBUSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBUSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBUSB", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSUBUSB: bad operands") +} + +// VPSUBUSW: Subtract Packed Unsigned Word Integers with Unsigned Saturation. +// +// Forms: +// +// VPSUBUSW xmm xmm xmm +// VPSUBUSW m128 xmm xmm +// VPSUBUSW ymm ymm ymm +// VPSUBUSW m256 ymm ymm +func VPSUBUSW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBUSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBUSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBUSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBUSW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSUBUSW: bad operands") +} + +// VPSUBW: Subtract Packed Word Integers. +// +// Forms: +// +// VPSUBW xmm xmm xmm +// VPSUBW m128 xmm xmm +// VPSUBW ymm ymm ymm +// VPSUBW m256 ymm ymm +func VPSUBW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPSUBW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPSUBW: bad operands") +} + +// VPTEST: Packed Logical Compare. +// +// Forms: +// +// VPTEST xmm xmm +// VPTEST m128 xmm +// VPTEST ymm ymm +// VPTEST m256 ymm +func VPTEST(mxy, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPTEST", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VPTEST", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPTEST", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VPTEST", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("VPTEST: bad operands") +} + +// VPUNPCKHBW: Unpack and Interleave High-Order Bytes into Words. +// +// Forms: +// +// VPUNPCKHBW xmm xmm xmm +// VPUNPCKHBW m128 xmm xmm +// VPUNPCKHBW ymm ymm ymm +// VPUNPCKHBW m256 ymm ymm +func VPUNPCKHBW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKHBW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKHBW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKHBW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKHBW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPUNPCKHBW: bad operands") +} + +// VPUNPCKHDQ: Unpack and Interleave High-Order Doublewords into Quadwords. +// +// Forms: +// +// VPUNPCKHDQ xmm xmm xmm +// VPUNPCKHDQ m128 xmm xmm +// VPUNPCKHDQ ymm ymm ymm +// VPUNPCKHDQ m256 ymm ymm +func VPUNPCKHDQ(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKHDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKHDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKHDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKHDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPUNPCKHDQ: bad operands") +} + +// VPUNPCKHQDQ: Unpack and Interleave High-Order Quadwords into Double Quadwords. +// +// Forms: +// +// VPUNPCKHQDQ xmm xmm xmm +// VPUNPCKHQDQ m128 xmm xmm +// VPUNPCKHQDQ ymm ymm ymm +// VPUNPCKHQDQ m256 ymm ymm +func VPUNPCKHQDQ(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKHQDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKHQDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKHQDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKHQDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPUNPCKHQDQ: bad operands") +} + +// VPUNPCKHWD: Unpack and Interleave High-Order Words into Doublewords. +// +// Forms: +// +// VPUNPCKHWD xmm xmm xmm +// VPUNPCKHWD m128 xmm xmm +// VPUNPCKHWD ymm ymm ymm +// VPUNPCKHWD m256 ymm ymm +func VPUNPCKHWD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKHWD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKHWD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKHWD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKHWD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPUNPCKHWD: bad operands") +} + +// VPUNPCKLBW: Unpack and Interleave Low-Order Bytes into Words. +// +// Forms: +// +// VPUNPCKLBW xmm xmm xmm +// VPUNPCKLBW m128 xmm xmm +// VPUNPCKLBW ymm ymm ymm +// VPUNPCKLBW m256 ymm ymm +func VPUNPCKLBW(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKLBW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKLBW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKLBW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKLBW", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPUNPCKLBW: bad operands") +} + +// VPUNPCKLDQ: Unpack and Interleave Low-Order Doublewords into Quadwords. +// +// Forms: +// +// VPUNPCKLDQ xmm xmm xmm +// VPUNPCKLDQ m128 xmm xmm +// VPUNPCKLDQ ymm ymm ymm +// VPUNPCKLDQ m256 ymm ymm +func VPUNPCKLDQ(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKLDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKLDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKLDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKLDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPUNPCKLDQ: bad operands") +} + +// VPUNPCKLQDQ: Unpack and Interleave Low-Order Quadwords into Double Quadwords. +// +// Forms: +// +// VPUNPCKLQDQ xmm xmm xmm +// VPUNPCKLQDQ m128 xmm xmm +// VPUNPCKLQDQ ymm ymm ymm +// VPUNPCKLQDQ m256 ymm ymm +func VPUNPCKLQDQ(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKLQDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKLQDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKLQDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKLQDQ", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPUNPCKLQDQ: bad operands") +} + +// VPUNPCKLWD: Unpack and Interleave Low-Order Words into Doublewords. +// +// Forms: +// +// VPUNPCKLWD xmm xmm xmm +// VPUNPCKLWD m128 xmm xmm +// VPUNPCKLWD ymm ymm ymm +// VPUNPCKLWD m256 ymm ymm +func VPUNPCKLWD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKLWD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKLWD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKLWD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPUNPCKLWD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPUNPCKLWD: bad operands") +} + +// VPXOR: Packed Bitwise Logical Exclusive OR. +// +// Forms: +// +// VPXOR xmm xmm xmm +// VPXOR m128 xmm xmm +// VPXOR ymm ymm ymm +// VPXOR m256 ymm ymm +func VPXOR(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPXOR", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VPXOR", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPXOR", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VPXOR", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VPXOR: bad operands") +} + +// VRCPPS: Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VRCPPS xmm xmm +// VRCPPS m128 xmm +// VRCPPS ymm ymm +// VRCPPS m256 ymm +func VRCPPS(mxy, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VRCPPS", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VRCPPS", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VRCPPS", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VRCPPS", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VRCPPS: bad operands") +} + +// VRCPSS: Compute Approximate Reciprocal of Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VRCPSS xmm xmm xmm +// VRCPSS m32 xmm xmm +func VRCPSS(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VRCPSS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VRCPSS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VRCPSS: bad operands") +} + +// VROUNDPD: Round Packed Double Precision Floating-Point Values. +// +// Forms: +// +// VROUNDPD imm8 xmm xmm +// VROUNDPD imm8 m128 xmm +// VROUNDPD imm8 ymm ymm +// VROUNDPD imm8 m256 ymm +func VROUNDPD(i, mxy, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VROUNDPD", + Operands: []operand.Op{i, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VROUNDPD", + Operands: []operand.Op{i, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsIMM8(i) && operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VROUNDPD", + Operands: []operand.Op{i, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsIMM8(i) && operand.IsM256(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VROUNDPD", + Operands: []operand.Op{i, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VROUNDPD: bad operands") +} + +// VROUNDPS: Round Packed Single Precision Floating-Point Values. +// +// Forms: +// +// VROUNDPS imm8 xmm xmm +// VROUNDPS imm8 m128 xmm +// VROUNDPS imm8 ymm ymm +// VROUNDPS imm8 m256 ymm +func VROUNDPS(i, mxy, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VROUNDPS", + Operands: []operand.Op{i, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VROUNDPS", + Operands: []operand.Op{i, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsIMM8(i) && operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VROUNDPS", + Operands: []operand.Op{i, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsIMM8(i) && operand.IsM256(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VROUNDPS", + Operands: []operand.Op{i, mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VROUNDPS: bad operands") +} + +// VROUNDSD: Round Scalar Double Precision Floating-Point Values. +// +// Forms: +// +// VROUNDSD imm8 xmm xmm xmm +// VROUNDSD imm8 m64 xmm xmm +func VROUNDSD(i, mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VROUNDSD", + Operands: []operand.Op{i, mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsIMM8(i) && operand.IsM64(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VROUNDSD", + Operands: []operand.Op{i, mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VROUNDSD: bad operands") +} + +// VROUNDSS: Round Scalar Single Precision Floating-Point Values. +// +// Forms: +// +// VROUNDSS imm8 xmm xmm xmm +// VROUNDSS imm8 m32 xmm xmm +func VROUNDSS(i, mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VROUNDSS", + Operands: []operand.Op{i, mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsIMM8(i) && operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VROUNDSS", + Operands: []operand.Op{i, mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VROUNDSS: bad operands") +} + +// VRSQRTPS: Compute Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VRSQRTPS xmm xmm +// VRSQRTPS m128 xmm +// VRSQRTPS ymm ymm +// VRSQRTPS m256 ymm +func VRSQRTPS(mxy, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VRSQRTPS", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VRSQRTPS", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VRSQRTPS", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VRSQRTPS", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VRSQRTPS: bad operands") +} + +// VRSQRTSS: Compute Reciprocal of Square Root of Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// VRSQRTSS xmm xmm xmm +// VRSQRTSS m32 xmm xmm +func VRSQRTSS(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VRSQRTSS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VRSQRTSS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VRSQRTSS: bad operands") +} + +// VSHUFPD: Shuffle Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VSHUFPD imm8 xmm xmm xmm +// VSHUFPD imm8 m128 xmm xmm +// VSHUFPD imm8 ymm ymm ymm +// VSHUFPD imm8 m256 ymm ymm +func VSHUFPD(i, mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VSHUFPD", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VSHUFPD", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VSHUFPD", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VSHUFPD", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VSHUFPD: bad operands") +} + +// VSHUFPS: Shuffle Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VSHUFPS imm8 xmm xmm xmm +// VSHUFPS imm8 m128 xmm xmm +// VSHUFPS imm8 ymm ymm ymm +// VSHUFPS imm8 m256 ymm ymm +func VSHUFPS(i, mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(i) && operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VSHUFPS", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VSHUFPS", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VSHUFPS", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsIMM8(i) && operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VSHUFPS", + Operands: []operand.Op{i, mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VSHUFPS: bad operands") +} + +// VSQRTPD: Compute Square Roots of Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VSQRTPD xmm xmm +// VSQRTPD m128 xmm +// VSQRTPD ymm ymm +// VSQRTPD m256 ymm +func VSQRTPD(mxy, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VSQRTPD", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VSQRTPD", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VSQRTPD", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VSQRTPD", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VSQRTPD: bad operands") +} + +// VSQRTPS: Compute Square Roots of Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VSQRTPS xmm xmm +// VSQRTPS m128 xmm +// VSQRTPS ymm ymm +// VSQRTPS m256 ymm +func VSQRTPS(mxy, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VSQRTPS", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VSQRTPS", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VSQRTPS", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VSQRTPS", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy}, + Outputs: []operand.Op{xy}, + }, nil + } + return nil, errors.New("VSQRTPS: bad operands") +} + +// VSQRTSD: Compute Square Root of Scalar Double-Precision Floating-Point Value. +// +// Forms: +// +// VSQRTSD xmm xmm xmm +// VSQRTSD m64 xmm xmm +func VSQRTSD(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VSQRTSD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VSQRTSD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VSQRTSD: bad operands") +} + +// VSQRTSS: Compute Square Root of Scalar Single-Precision Floating-Point Value. +// +// Forms: +// +// VSQRTSS xmm xmm xmm +// VSQRTSS m32 xmm xmm +func VSQRTSS(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VSQRTSS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VSQRTSS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VSQRTSS: bad operands") +} + +// VSTMXCSR: Store MXCSR Register State. +// +// Forms: +// +// VSTMXCSR m32 +func VSTMXCSR(m operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsM32(m): + return &intrep.Instruction{ + Opcode: "VSTMXCSR", + Operands: []operand.Op{m}, + Inputs: []operand.Op{}, + Outputs: []operand.Op{m}, + }, nil + } + return nil, errors.New("VSTMXCSR: bad operands") +} + +// VSUBPD: Subtract Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VSUBPD xmm xmm xmm +// VSUBPD m128 xmm xmm +// VSUBPD ymm ymm ymm +// VSUBPD m256 ymm ymm +func VSUBPD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VSUBPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VSUBPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VSUBPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VSUBPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VSUBPD: bad operands") +} + +// VSUBPS: Subtract Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VSUBPS xmm xmm xmm +// VSUBPS m128 xmm xmm +// VSUBPS ymm ymm ymm +// VSUBPS m256 ymm ymm +func VSUBPS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VSUBPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VSUBPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VSUBPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VSUBPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VSUBPS: bad operands") +} + +// VSUBSD: Subtract Scalar Double-Precision Floating-Point Values. +// +// Forms: +// +// VSUBSD xmm xmm xmm +// VSUBSD m64 xmm xmm +func VSUBSD(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VSUBSD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VSUBSD", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VSUBSD: bad operands") +} + +// VSUBSS: Subtract Scalar Single-Precision Floating-Point Values. +// +// Forms: +// +// VSUBSS xmm xmm xmm +// VSUBSS m32 xmm xmm +func VSUBSS(mx, x, x1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VSUBSS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x) && operand.IsXMM(x1): + return &intrep.Instruction{ + Opcode: "VSUBSS", + Operands: []operand.Op{mx, x, x1}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x1}, + }, nil + } + return nil, errors.New("VSUBSS: bad operands") +} + +// VTESTPD: Packed Double-Precision Floating-Point Bit Test. +// +// Forms: +// +// VTESTPD xmm xmm +// VTESTPD m128 xmm +// VTESTPD ymm ymm +// VTESTPD m256 ymm +func VTESTPD(mxy, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VTESTPD", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VTESTPD", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VTESTPD", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VTESTPD", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("VTESTPD: bad operands") +} + +// VTESTPS: Packed Single-Precision Floating-Point Bit Test. +// +// Forms: +// +// VTESTPS xmm xmm +// VTESTPS m128 xmm +// VTESTPS ymm ymm +// VTESTPS m256 ymm +func VTESTPS(mxy, xy operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VTESTPS", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy): + return &intrep.Instruction{ + Opcode: "VTESTPS", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VTESTPS", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy): + return &intrep.Instruction{ + Opcode: "VTESTPS", + Operands: []operand.Op{mxy, xy}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("VTESTPS: bad operands") +} + +// VUCOMISD: Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// VUCOMISD xmm xmm +// VUCOMISD m64 xmm +func VUCOMISD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VUCOMISD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM64(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VUCOMISD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("VUCOMISD: bad operands") +} + +// VUCOMISS: Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS. +// +// Forms: +// +// VUCOMISS xmm xmm +// VUCOMISS m32 xmm +func VUCOMISS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VUCOMISS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{}, + }, nil + case operand.IsM32(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "VUCOMISS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{}, + }, nil + } + return nil, errors.New("VUCOMISS: bad operands") +} + +// VUNPCKHPD: Unpack and Interleave High Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VUNPCKHPD xmm xmm xmm +// VUNPCKHPD m128 xmm xmm +// VUNPCKHPD ymm ymm ymm +// VUNPCKHPD m256 ymm ymm +func VUNPCKHPD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VUNPCKHPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VUNPCKHPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VUNPCKHPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VUNPCKHPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VUNPCKHPD: bad operands") +} + +// VUNPCKHPS: Unpack and Interleave High Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VUNPCKHPS xmm xmm xmm +// VUNPCKHPS m128 xmm xmm +// VUNPCKHPS ymm ymm ymm +// VUNPCKHPS m256 ymm ymm +func VUNPCKHPS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VUNPCKHPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VUNPCKHPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VUNPCKHPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VUNPCKHPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VUNPCKHPS: bad operands") +} + +// VUNPCKLPD: Unpack and Interleave Low Packed Double-Precision Floating-Point Values. +// +// Forms: +// +// VUNPCKLPD xmm xmm xmm +// VUNPCKLPD m128 xmm xmm +// VUNPCKLPD ymm ymm ymm +// VUNPCKLPD m256 ymm ymm +func VUNPCKLPD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VUNPCKLPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VUNPCKLPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VUNPCKLPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VUNPCKLPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VUNPCKLPD: bad operands") +} + +// VUNPCKLPS: Unpack and Interleave Low Packed Single-Precision Floating-Point Values. +// +// Forms: +// +// VUNPCKLPS xmm xmm xmm +// VUNPCKLPS m128 xmm xmm +// VUNPCKLPS ymm ymm ymm +// VUNPCKLPS m256 ymm ymm +func VUNPCKLPS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VUNPCKLPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VUNPCKLPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VUNPCKLPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VUNPCKLPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VUNPCKLPS: bad operands") +} + +// VXORPD: Bitwise Logical XOR for Double-Precision Floating-Point Values. +// +// Forms: +// +// VXORPD xmm xmm xmm +// VXORPD m128 xmm xmm +// VXORPD ymm ymm ymm +// VXORPD m256 ymm ymm +func VXORPD(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VXORPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VXORPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VXORPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VXORPD", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VXORPD: bad operands") +} + +// VXORPS: Bitwise Logical XOR for Single-Precision Floating-Point Values. +// +// Forms: +// +// VXORPS xmm xmm xmm +// VXORPS m128 xmm xmm +// VXORPS ymm ymm ymm +// VXORPS m256 ymm ymm +func VXORPS(mxy, xy, xy1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VXORPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM128(mxy) && operand.IsXMM(xy) && operand.IsXMM(xy1): + return &intrep.Instruction{ + Opcode: "VXORPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsYMM(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VXORPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + case operand.IsM256(mxy) && operand.IsYMM(xy) && operand.IsYMM(xy1): + return &intrep.Instruction{ + Opcode: "VXORPS", + Operands: []operand.Op{mxy, xy, xy1}, + Inputs: []operand.Op{mxy, xy}, + Outputs: []operand.Op{xy1}, + }, nil + } + return nil, errors.New("VXORPS: bad operands") +} + +// VZEROALL: Zero All YMM Registers. +// +// Forms: +// +// VZEROALL +func VZEROALL() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "VZEROALL", + Operands: nil, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + }, nil +} + +// VZEROUPPER: Zero Upper Bits of YMM Registers. +// +// Forms: +// +// VZEROUPPER +func VZEROUPPER() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "VZEROUPPER", + Operands: nil, + Inputs: []operand.Op{}, + Outputs: []operand.Op{}, + }, nil +} + +// XADDB: Exchange and Add. +// +// Forms: +// +// XADDB r8 r8 +// XADDB r8 m8 +func XADDB(r, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(r) && operand.IsR8(mr): + return &intrep.Instruction{ + Opcode: "XADDB", + Operands: []operand.Op{r, mr}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r, mr}, + }, nil + case operand.IsR8(r) && operand.IsM8(mr): + return &intrep.Instruction{ + Opcode: "XADDB", + Operands: []operand.Op{r, mr}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r, mr}, + }, nil + } + return nil, errors.New("XADDB: bad operands") +} + +// XADDL: Exchange and Add. +// +// Forms: +// +// XADDL r32 r32 +// XADDL r32 m32 +func XADDL(r, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(r) && operand.IsR32(mr): + return &intrep.Instruction{ + Opcode: "XADDL", + Operands: []operand.Op{r, mr}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r, mr}, + }, nil + case operand.IsR32(r) && operand.IsM32(mr): + return &intrep.Instruction{ + Opcode: "XADDL", + Operands: []operand.Op{r, mr}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r, mr}, + }, nil + } + return nil, errors.New("XADDL: bad operands") +} + +// XADDQ: Exchange and Add. +// +// Forms: +// +// XADDQ r64 r64 +// XADDQ r64 m64 +func XADDQ(r, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(r) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "XADDQ", + Operands: []operand.Op{r, mr}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r, mr}, + }, nil + case operand.IsR64(r) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "XADDQ", + Operands: []operand.Op{r, mr}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r, mr}, + }, nil + } + return nil, errors.New("XADDQ: bad operands") +} + +// XADDW: Exchange and Add. +// +// Forms: +// +// XADDW r16 r16 +// XADDW r16 m16 +func XADDW(r, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(r) && operand.IsR16(mr): + return &intrep.Instruction{ + Opcode: "XADDW", + Operands: []operand.Op{r, mr}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r, mr}, + }, nil + case operand.IsR16(r) && operand.IsM16(mr): + return &intrep.Instruction{ + Opcode: "XADDW", + Operands: []operand.Op{r, mr}, + Inputs: []operand.Op{r, mr}, + Outputs: []operand.Op{r, mr}, + }, nil + } + return nil, errors.New("XADDW: bad operands") +} + +// XCHGB: Exchange Register/Memory with Register. +// +// Forms: +// +// XCHGB r8 r8 +// XCHGB m8 r8 +// XCHGB r8 m8 +func XCHGB(mr, mr1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR8(mr) && operand.IsR8(mr1): + return &intrep.Instruction{ + Opcode: "XCHGB", + Operands: []operand.Op{mr, mr1}, + Inputs: []operand.Op{mr, mr1}, + Outputs: []operand.Op{mr, mr1}, + }, nil + case operand.IsM8(mr) && operand.IsR8(mr1): + return &intrep.Instruction{ + Opcode: "XCHGB", + Operands: []operand.Op{mr, mr1}, + Inputs: []operand.Op{mr, mr1}, + Outputs: []operand.Op{mr, mr1}, + }, nil + case operand.IsR8(mr) && operand.IsM8(mr1): + return &intrep.Instruction{ + Opcode: "XCHGB", + Operands: []operand.Op{mr, mr1}, + Inputs: []operand.Op{mr, mr1}, + Outputs: []operand.Op{mr, mr1}, + }, nil + } + return nil, errors.New("XCHGB: bad operands") +} + +// XCHGL: Exchange Register/Memory with Register. +// +// Forms: +// +// XCHGL r32 eax +// XCHGL eax r32 +// XCHGL r32 r32 +// XCHGL m32 r32 +// XCHGL r32 m32 +func XCHGL(emr, emr1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR32(emr) && operand.IsEAX(emr1): + return &intrep.Instruction{ + Opcode: "XCHGL", + Operands: []operand.Op{emr, emr1}, + Inputs: []operand.Op{emr, emr1}, + Outputs: []operand.Op{emr, emr1}, + }, nil + case operand.IsEAX(emr) && operand.IsR32(emr1): + return &intrep.Instruction{ + Opcode: "XCHGL", + Operands: []operand.Op{emr, emr1}, + Inputs: []operand.Op{emr, emr1}, + Outputs: []operand.Op{emr, emr1}, + }, nil + case operand.IsR32(emr) && operand.IsR32(emr1): + return &intrep.Instruction{ + Opcode: "XCHGL", + Operands: []operand.Op{emr, emr1}, + Inputs: []operand.Op{emr, emr1}, + Outputs: []operand.Op{emr, emr1}, + }, nil + case operand.IsM32(emr) && operand.IsR32(emr1): + return &intrep.Instruction{ + Opcode: "XCHGL", + Operands: []operand.Op{emr, emr1}, + Inputs: []operand.Op{emr, emr1}, + Outputs: []operand.Op{emr, emr1}, + }, nil + case operand.IsR32(emr) && operand.IsM32(emr1): + return &intrep.Instruction{ + Opcode: "XCHGL", + Operands: []operand.Op{emr, emr1}, + Inputs: []operand.Op{emr, emr1}, + Outputs: []operand.Op{emr, emr1}, + }, nil + } + return nil, errors.New("XCHGL: bad operands") +} + +// XCHGQ: Exchange Register/Memory with Register. +// +// Forms: +// +// XCHGQ r64 rax +// XCHGQ rax r64 +// XCHGQ r64 r64 +// XCHGQ m64 r64 +// XCHGQ r64 m64 +func XCHGQ(mr, mr1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR64(mr) && operand.IsRAX(mr1): + return &intrep.Instruction{ + Opcode: "XCHGQ", + Operands: []operand.Op{mr, mr1}, + Inputs: []operand.Op{mr, mr1}, + Outputs: []operand.Op{mr, mr1}, + }, nil + case operand.IsRAX(mr) && operand.IsR64(mr1): + return &intrep.Instruction{ + Opcode: "XCHGQ", + Operands: []operand.Op{mr, mr1}, + Inputs: []operand.Op{mr, mr1}, + Outputs: []operand.Op{mr, mr1}, + }, nil + case operand.IsR64(mr) && operand.IsR64(mr1): + return &intrep.Instruction{ + Opcode: "XCHGQ", + Operands: []operand.Op{mr, mr1}, + Inputs: []operand.Op{mr, mr1}, + Outputs: []operand.Op{mr, mr1}, + }, nil + case operand.IsM64(mr) && operand.IsR64(mr1): + return &intrep.Instruction{ + Opcode: "XCHGQ", + Operands: []operand.Op{mr, mr1}, + Inputs: []operand.Op{mr, mr1}, + Outputs: []operand.Op{mr, mr1}, + }, nil + case operand.IsR64(mr) && operand.IsM64(mr1): + return &intrep.Instruction{ + Opcode: "XCHGQ", + Operands: []operand.Op{mr, mr1}, + Inputs: []operand.Op{mr, mr1}, + Outputs: []operand.Op{mr, mr1}, + }, nil + } + return nil, errors.New("XCHGQ: bad operands") +} + +// XCHGW: Exchange Register/Memory with Register. +// +// Forms: +// +// XCHGW r16 ax +// XCHGW ax r16 +// XCHGW r16 r16 +// XCHGW m16 r16 +// XCHGW r16 m16 +func XCHGW(amr, amr1 operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsR16(amr) && operand.IsAX(amr1): + return &intrep.Instruction{ + Opcode: "XCHGW", + Operands: []operand.Op{amr, amr1}, + Inputs: []operand.Op{amr, amr1}, + Outputs: []operand.Op{amr, amr1}, + }, nil + case operand.IsAX(amr) && operand.IsR16(amr1): + return &intrep.Instruction{ + Opcode: "XCHGW", + Operands: []operand.Op{amr, amr1}, + Inputs: []operand.Op{amr, amr1}, + Outputs: []operand.Op{amr, amr1}, + }, nil + case operand.IsR16(amr) && operand.IsR16(amr1): + return &intrep.Instruction{ + Opcode: "XCHGW", + Operands: []operand.Op{amr, amr1}, + Inputs: []operand.Op{amr, amr1}, + Outputs: []operand.Op{amr, amr1}, + }, nil + case operand.IsM16(amr) && operand.IsR16(amr1): + return &intrep.Instruction{ + Opcode: "XCHGW", + Operands: []operand.Op{amr, amr1}, + Inputs: []operand.Op{amr, amr1}, + Outputs: []operand.Op{amr, amr1}, + }, nil + case operand.IsR16(amr) && operand.IsM16(amr1): + return &intrep.Instruction{ + Opcode: "XCHGW", + Operands: []operand.Op{amr, amr1}, + Inputs: []operand.Op{amr, amr1}, + Outputs: []operand.Op{amr, amr1}, + }, nil + } + return nil, errors.New("XCHGW: bad operands") +} + +// XGETBV: Get Value of Extended Control Register. +// +// Forms: +// +// XGETBV +func XGETBV() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "XGETBV", + Operands: nil, + Inputs: []operand.Op{reg.ECX}, + Outputs: []operand.Op{reg.EAX, reg.EDX}, + }, nil +} + +// XLAT: Table Look-up Translation. +// +// Forms: +// +// XLAT +func XLAT() (*intrep.Instruction, error) { + return &intrep.Instruction{ + Opcode: "XLAT", + Operands: nil, + Inputs: []operand.Op{reg.AL, reg.EBX}, + Outputs: []operand.Op{reg.AL}, + }, nil +} + +// XORB: Logical Exclusive OR. +// +// Forms: +// +// XORB imm8 al +// XORB imm8 r8 +// XORB r8 r8 +// XORB m8 r8 +// XORB imm8 m8 +// XORB r8 m8 +func XORB(imr, amr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM8(imr) && operand.IsAL(amr): + return &intrep.Instruction{ + Opcode: "XORB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR8(amr): + return &intrep.Instruction{ + Opcode: "XORB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR8(imr) && operand.IsR8(amr): + return &intrep.Instruction{ + Opcode: "XORB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsM8(imr) && operand.IsR8(amr): + return &intrep.Instruction{ + Opcode: "XORB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM8(amr): + return &intrep.Instruction{ + Opcode: "XORB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR8(imr) && operand.IsM8(amr): + return &intrep.Instruction{ + Opcode: "XORB", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + } + return nil, errors.New("XORB: bad operands") +} + +// XORL: Logical Exclusive OR. +// +// Forms: +// +// XORL imm32 eax +// XORL imm8 r32 +// XORL imm32 r32 +// XORL r32 r32 +// XORL m32 r32 +// XORL imm8 m32 +// XORL imm32 m32 +// XORL r32 m32 +func XORL(imr, emr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM32(imr) && operand.IsEAX(emr): + return &intrep.Instruction{ + Opcode: "XORL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "XORL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM32(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "XORL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsR32(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "XORL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{imr, emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsM32(imr) && operand.IsR32(emr): + return &intrep.Instruction{ + Opcode: "XORL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{imr, emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM32(emr): + return &intrep.Instruction{ + Opcode: "XORL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsIMM32(imr) && operand.IsM32(emr): + return &intrep.Instruction{ + Opcode: "XORL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{emr}, + Outputs: []operand.Op{emr}, + }, nil + case operand.IsR32(imr) && operand.IsM32(emr): + return &intrep.Instruction{ + Opcode: "XORL", + Operands: []operand.Op{imr, emr}, + Inputs: []operand.Op{imr, emr}, + Outputs: []operand.Op{emr}, + }, nil + } + return nil, errors.New("XORL: bad operands") +} + +// XORPD: Bitwise Logical XOR for Double-Precision Floating-Point Values. +// +// Forms: +// +// XORPD xmm xmm +// XORPD m128 xmm +func XORPD(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "XORPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "XORPD", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("XORPD: bad operands") +} + +// XORPS: Bitwise Logical XOR for Single-Precision Floating-Point Values. +// +// Forms: +// +// XORPS xmm xmm +// XORPS m128 xmm +func XORPS(mx, x operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsXMM(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "XORPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + case operand.IsM128(mx) && operand.IsXMM(x): + return &intrep.Instruction{ + Opcode: "XORPS", + Operands: []operand.Op{mx, x}, + Inputs: []operand.Op{mx, x}, + Outputs: []operand.Op{x}, + }, nil + } + return nil, errors.New("XORPS: bad operands") +} + +// XORQ: Logical Exclusive OR. +// +// Forms: +// +// XORQ imm32 rax +// XORQ imm8 r64 +// XORQ imm32 r64 +// XORQ r64 r64 +// XORQ m64 r64 +// XORQ imm8 m64 +// XORQ imm32 m64 +// XORQ r64 m64 +func XORQ(imr, mr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM32(imr) && operand.IsRAX(mr): + return &intrep.Instruction{ + Opcode: "XORQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "XORQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM32(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "XORQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR64(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "XORQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsM64(imr) && operand.IsR64(mr): + return &intrep.Instruction{ + Opcode: "XORQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr, mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "XORQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsIMM32(imr) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "XORQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{mr}, + Outputs: []operand.Op{mr}, + }, nil + case operand.IsR64(imr) && operand.IsM64(mr): + return &intrep.Instruction{ + Opcode: "XORQ", + Operands: []operand.Op{imr, mr}, + Inputs: []operand.Op{imr, mr}, + Outputs: []operand.Op{mr}, + }, nil + } + return nil, errors.New("XORQ: bad operands") +} + +// XORW: Logical Exclusive OR. +// +// Forms: +// +// XORW imm16 ax +// XORW imm8 r16 +// XORW imm16 r16 +// XORW r16 r16 +// XORW m16 r16 +// XORW imm8 m16 +// XORW imm16 m16 +// XORW r16 m16 +func XORW(imr, amr operand.Op) (*intrep.Instruction, error) { + switch { + case operand.IsIMM16(imr) && operand.IsAX(amr): + return &intrep.Instruction{ + Opcode: "XORW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "XORW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM16(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "XORW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR16(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "XORW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsM16(imr) && operand.IsR16(amr): + return &intrep.Instruction{ + Opcode: "XORW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM8(imr) && operand.IsM16(amr): + return &intrep.Instruction{ + Opcode: "XORW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsIMM16(imr) && operand.IsM16(amr): + return &intrep.Instruction{ + Opcode: "XORW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{amr}, + Outputs: []operand.Op{amr}, + }, nil + case operand.IsR16(imr) && operand.IsM16(amr): + return &intrep.Instruction{ + Opcode: "XORW", + Operands: []operand.Op{imr, amr}, + Inputs: []operand.Op{imr, amr}, + Outputs: []operand.Op{amr}, + }, nil + } + return nil, errors.New("XORW: bad operands") +} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 00000000..685f0e7e --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := dstStart.Sub(srcStart) / srcInterval + srcIndex += int(advance) + srcStart = srcStart.Add(advance * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 00000000..c646a695 --- /dev/null +++ b/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` + + + events + + + + +

/debug/events

+ + + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 00000000..9bf4286c --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) + }) + return distTmplCache +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 00000000..f00d869f --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1111 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "net/url" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: "/debug/requests"}}) + if pat != "" { + panic("/debug/requests is already registered. You may have two independent copies of " + + "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " + + "involve a vendored copy of golang.org/x/net/trace.") + } + + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc("/debug/requests", Traces) + http.HandleFunc("/debug/events", Events) +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + elapsed := time.Now().Sub(tr.Start) + tr.mu.Lock() + tr.Elapsed = elapsed + tr.mu.Unlock() + + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + tr.mu.RLock() // protects tr fields in Cond.match calls + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + tr.mu.RUnlock() + + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Start time of the this trace. + Start time.Time + + mu sync.RWMutex + events []event // Append-only sequence of events (modulo discards). + maxEvents int + recycler func(interface{}) + IsError bool // Whether this trace resulted in an error. + Elapsed time.Duration // Elapsed time for this trace, zero while active. + traceID uint64 // Trace information if non-zero. + spanID uint64 + + refs int32 // how many buckets this is in + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + + tr.mu.Lock() + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.recycler = nil + tr.mu.Unlock() + + tr.refs = 0 + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { + tr.mu.Lock() + tr.IsError = true + tr.mu.Unlock() +} + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.mu.Lock() + tr.recycler = f + tr.mu.Unlock() +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.mu.Lock() + tr.traceID, tr.spanID = traceID, spanID + tr.mu.Unlock() +} + +func (tr *trace) SetMaxEvents(m int) { + tr.mu.Lock() + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } + tr.mu.Unlock() +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + tr.mu.RLock() + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + tr.mu.RUnlock() + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + tr.mu.RLock() + t := tr.Elapsed + tr.mu.RUnlock() + + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/golang.org/x/net/trace/trace_go16.go b/vendor/golang.org/x/net/trace/trace_go16.go new file mode 100644 index 00000000..d6081911 --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_go16.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package trace + +import "golang.org/x/net/context" + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} diff --git a/vendor/golang.org/x/net/trace/trace_go17.go b/vendor/golang.org/x/net/trace/trace_go17.go new file mode 100644 index 00000000..df6e1fba --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_go17.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package trace + +import "context" + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} diff --git a/vendor/golang.org/x/tools/AUTHORS b/vendor/golang.org/x/tools/AUTHORS new file mode 100644 index 00000000..15167cd7 --- /dev/null +++ b/vendor/golang.org/x/tools/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/tools/CONTRIBUTORS b/vendor/golang.org/x/tools/CONTRIBUTORS new file mode 100644 index 00000000..1c4577e9 --- /dev/null +++ b/vendor/golang.org/x/tools/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/tools/PATENTS b/vendor/golang.org/x/tools/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/tools/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/tools/cmd/getgo/LICENSE b/vendor/golang.org/x/tools/cmd/getgo/LICENSE new file mode 100644 index 00000000..32017f8f --- /dev/null +++ b/vendor/golang.org/x/tools/cmd/getgo/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go new file mode 100644 index 00000000..6b7052b8 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -0,0 +1,627 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +// This file defines utilities for working with source positions. + +import ( + "fmt" + "go/ast" + "go/token" + "sort" +) + +// PathEnclosingInterval returns the node that encloses the source +// interval [start, end), and all its ancestors up to the AST root. +// +// The definition of "enclosing" used by this function considers +// additional whitespace abutting a node to be enclosed by it. +// In this example: +// +// z := x + y // add them +// <-A-> +// <----B-----> +// +// the ast.BinaryExpr(+) node is considered to enclose interval B +// even though its [Pos()..End()) is actually only interval A. +// This behaviour makes user interfaces more tolerant of imperfect +// input. +// +// This function treats tokens as nodes, though they are not included +// in the result. e.g. PathEnclosingInterval("+") returns the +// enclosing ast.BinaryExpr("x + y"). +// +// If start==end, the 1-char interval following start is used instead. +// +// The 'exact' result is true if the interval contains only path[0] +// and perhaps some adjacent whitespace. It is false if the interval +// overlaps multiple children of path[0], or if it contains only +// interior whitespace of path[0]. +// In this example: +// +// z := x + y // add them +// <--C--> <---E--> +// ^ +// D +// +// intervals C, D and E are inexact. C is contained by the +// z-assignment statement, because it spans three of its children (:=, +// x, +). So too is the 1-char interval D, because it contains only +// interior whitespace of the assignment. E is considered interior +// whitespace of the BlockStmt containing the assignment. +// +// Precondition: [start, end) both lie within the same file as root. +// TODO(adonovan): return (nil, false) in this case and remove precond. +// Requires FileSet; see loader.tokenFileContainsPos. +// +// Postcondition: path is never nil; it always contains at least 'root'. +// +func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { + // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging + + // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end). + var visit func(node ast.Node) bool + visit = func(node ast.Node) bool { + path = append(path, node) + + nodePos := node.Pos() + nodeEnd := node.End() + + // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging + + // Intersect [start, end) with interval of node. + if start < nodePos { + start = nodePos + } + if end > nodeEnd { + end = nodeEnd + } + + // Find sole child that contains [start, end). + children := childrenOf(node) + l := len(children) + for i, child := range children { + // [childPos, childEnd) is unaugmented interval of child. + childPos := child.Pos() + childEnd := child.End() + + // [augPos, augEnd) is whitespace-augmented interval of child. + augPos := childPos + augEnd := childEnd + if i > 0 { + augPos = children[i-1].End() // start of preceding whitespace + } + if i < l-1 { + nextChildPos := children[i+1].Pos() + // Does [start, end) lie between child and next child? + if start >= augEnd && end <= nextChildPos { + return false // inexact match + } + augEnd = nextChildPos // end of following whitespace + } + + // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n", + // i, augPos, augEnd, start, end) // debugging + + // Does augmented child strictly contain [start, end)? + if augPos <= start && end <= augEnd { + _, isToken := child.(tokenNode) + return isToken || visit(child) + } + + // Does [start, end) overlap multiple children? + // i.e. left-augmented child contains start + // but LR-augmented child does not contain end. + if start < childEnd && end > augEnd { + break + } + } + + // No single child contained [start, end), + // so node is the result. Is it exact? + + // (It's tempting to put this condition before the + // child loop, but it gives the wrong result in the + // case where a node (e.g. ExprStmt) and its sole + // child have equal intervals.) + if start == nodePos && end == nodeEnd { + return true // exact match + } + + return false // inexact: overlaps multiple children + } + + if start > end { + start, end = end, start + } + + if start < root.End() && end > root.Pos() { + if start == end { + end = start + 1 // empty interval => interval of size 1 + } + exact = visit(root) + + // Reverse the path: + for i, l := 0, len(path); i < l/2; i++ { + path[i], path[l-1-i] = path[l-1-i], path[i] + } + } else { + // Selection lies within whitespace preceding the + // first (or following the last) declaration in the file. + // The result nonetheless always includes the ast.File. + path = append(path, root) + } + + return +} + +// tokenNode is a dummy implementation of ast.Node for a single token. +// They are used transiently by PathEnclosingInterval but never escape +// this package. +// +type tokenNode struct { + pos token.Pos + end token.Pos +} + +func (n tokenNode) Pos() token.Pos { + return n.pos +} + +func (n tokenNode) End() token.Pos { + return n.end +} + +func tok(pos token.Pos, len int) ast.Node { + return tokenNode{pos, pos + token.Pos(len)} +} + +// childrenOf returns the direct non-nil children of ast.Node n. +// It may include fake ast.Node implementations for bare tokens. +// it is not safe to call (e.g.) ast.Walk on such nodes. +// +func childrenOf(n ast.Node) []ast.Node { + var children []ast.Node + + // First add nodes for all true subtrees. + ast.Inspect(n, func(node ast.Node) bool { + if node == n { // push n + return true // recur + } + if node != nil { // push child + children = append(children, node) + } + return false // no recursion + }) + + // Then add fake Nodes for bare tokens. + switch n := n.(type) { + case *ast.ArrayType: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Elt.End(), len("]"))) + + case *ast.AssignStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.BasicLit: + children = append(children, + tok(n.ValuePos, len(n.Value))) + + case *ast.BinaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.BlockStmt: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("}"))) + + case *ast.BranchStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.CallExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + if n.Ellipsis != 0 { + children = append(children, tok(n.Ellipsis, len("..."))) + } + + case *ast.CaseClause: + if n.List == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.ChanType: + switch n.Dir { + case ast.RECV: + children = append(children, tok(n.Begin, len("<-chan"))) + case ast.SEND: + children = append(children, tok(n.Begin, len("chan<-"))) + case ast.RECV | ast.SEND: + children = append(children, tok(n.Begin, len("chan"))) + } + + case *ast.CommClause: + if n.Comm == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.Comment: + // nop + + case *ast.CommentGroup: + // nop + + case *ast.CompositeLit: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("{"))) + + case *ast.DeclStmt: + // nop + + case *ast.DeferStmt: + children = append(children, + tok(n.Defer, len("defer"))) + + case *ast.Ellipsis: + children = append(children, + tok(n.Ellipsis, len("..."))) + + case *ast.EmptyStmt: + // nop + + case *ast.ExprStmt: + // nop + + case *ast.Field: + // TODO(adonovan): Field.{Doc,Comment,Tag}? + + case *ast.FieldList: + children = append(children, + tok(n.Opening, len("(")), + tok(n.Closing, len(")"))) + + case *ast.File: + // TODO test: Doc + children = append(children, + tok(n.Package, len("package"))) + + case *ast.ForStmt: + children = append(children, + tok(n.For, len("for"))) + + case *ast.FuncDecl: + // TODO(adonovan): FuncDecl.Comment? + + // Uniquely, FuncDecl breaks the invariant that + // preorder traversal yields tokens in lexical order: + // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func. + // + // As a workaround, we inline the case for FuncType + // here and order things correctly. + // + children = nil // discard ast.Walk(FuncDecl) info subtrees + children = append(children, tok(n.Type.Func, len("func"))) + if n.Recv != nil { + children = append(children, n.Recv) + } + children = append(children, n.Name) + if n.Type.Params != nil { + children = append(children, n.Type.Params) + } + if n.Type.Results != nil { + children = append(children, n.Type.Results) + } + if n.Body != nil { + children = append(children, n.Body) + } + + case *ast.FuncLit: + // nop + + case *ast.FuncType: + if n.Func != 0 { + children = append(children, + tok(n.Func, len("func"))) + } + + case *ast.GenDecl: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + if n.Lparen != 0 { + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + } + + case *ast.GoStmt: + children = append(children, + tok(n.Go, len("go"))) + + case *ast.Ident: + children = append(children, + tok(n.NamePos, len(n.Name))) + + case *ast.IfStmt: + children = append(children, + tok(n.If, len("if"))) + + case *ast.ImportSpec: + // TODO(adonovan): ImportSpec.{Doc,EndPos}? + + case *ast.IncDecStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.IndexExpr: + children = append(children, + tok(n.Lbrack, len("{")), + tok(n.Rbrack, len("}"))) + + case *ast.InterfaceType: + children = append(children, + tok(n.Interface, len("interface"))) + + case *ast.KeyValueExpr: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.LabeledStmt: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.MapType: + children = append(children, + tok(n.Map, len("map"))) + + case *ast.ParenExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.RangeStmt: + children = append(children, + tok(n.For, len("for")), + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.ReturnStmt: + children = append(children, + tok(n.Return, len("return"))) + + case *ast.SelectStmt: + children = append(children, + tok(n.Select, len("select"))) + + case *ast.SelectorExpr: + // nop + + case *ast.SendStmt: + children = append(children, + tok(n.Arrow, len("<-"))) + + case *ast.SliceExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.StarExpr: + children = append(children, tok(n.Star, len("*"))) + + case *ast.StructType: + children = append(children, tok(n.Struct, len("struct"))) + + case *ast.SwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.TypeAssertExpr: + children = append(children, + tok(n.Lparen-1, len(".")), + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.TypeSpec: + // TODO(adonovan): TypeSpec.{Doc,Comment}? + + case *ast.TypeSwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.UnaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.ValueSpec: + // TODO(adonovan): ValueSpec.{Doc,Comment}? + + case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt: + // nop + } + + // TODO(adonovan): opt: merge the logic of ast.Inspect() into + // the switch above so we can make interleaved callbacks for + // both Nodes and Tokens in the right order and avoid the need + // to sort. + sort.Sort(byPos(children)) + + return children +} + +type byPos []ast.Node + +func (sl byPos) Len() int { + return len(sl) +} +func (sl byPos) Less(i, j int) bool { + return sl[i].Pos() < sl[j].Pos() +} +func (sl byPos) Swap(i, j int) { + sl[i], sl[j] = sl[j], sl[i] +} + +// NodeDescription returns a description of the concrete type of n suitable +// for a user interface. +// +// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident, +// StarExpr) we could be much more specific given the path to the AST +// root. Perhaps we should do that. +// +func NodeDescription(n ast.Node) string { + switch n := n.(type) { + case *ast.ArrayType: + return "array type" + case *ast.AssignStmt: + return "assignment" + case *ast.BadDecl: + return "bad declaration" + case *ast.BadExpr: + return "bad expression" + case *ast.BadStmt: + return "bad statement" + case *ast.BasicLit: + return "basic literal" + case *ast.BinaryExpr: + return fmt.Sprintf("binary %s operation", n.Op) + case *ast.BlockStmt: + return "block" + case *ast.BranchStmt: + switch n.Tok { + case token.BREAK: + return "break statement" + case token.CONTINUE: + return "continue statement" + case token.GOTO: + return "goto statement" + case token.FALLTHROUGH: + return "fall-through statement" + } + case *ast.CallExpr: + if len(n.Args) == 1 && !n.Ellipsis.IsValid() { + return "function call (or conversion)" + } + return "function call" + case *ast.CaseClause: + return "case clause" + case *ast.ChanType: + return "channel type" + case *ast.CommClause: + return "communication clause" + case *ast.Comment: + return "comment" + case *ast.CommentGroup: + return "comment group" + case *ast.CompositeLit: + return "composite literal" + case *ast.DeclStmt: + return NodeDescription(n.Decl) + " statement" + case *ast.DeferStmt: + return "defer statement" + case *ast.Ellipsis: + return "ellipsis" + case *ast.EmptyStmt: + return "empty statement" + case *ast.ExprStmt: + return "expression statement" + case *ast.Field: + // Can be any of these: + // struct {x, y int} -- struct field(s) + // struct {T} -- anon struct field + // interface {I} -- interface embedding + // interface {f()} -- interface method + // func (A) func(B) C -- receiver, param(s), result(s) + return "field/method/parameter" + case *ast.FieldList: + return "field/method/parameter list" + case *ast.File: + return "source file" + case *ast.ForStmt: + return "for loop" + case *ast.FuncDecl: + return "function declaration" + case *ast.FuncLit: + return "function literal" + case *ast.FuncType: + return "function type" + case *ast.GenDecl: + switch n.Tok { + case token.IMPORT: + return "import declaration" + case token.CONST: + return "constant declaration" + case token.TYPE: + return "type declaration" + case token.VAR: + return "variable declaration" + } + case *ast.GoStmt: + return "go statement" + case *ast.Ident: + return "identifier" + case *ast.IfStmt: + return "if statement" + case *ast.ImportSpec: + return "import specification" + case *ast.IncDecStmt: + if n.Tok == token.INC { + return "increment statement" + } + return "decrement statement" + case *ast.IndexExpr: + return "index expression" + case *ast.InterfaceType: + return "interface type" + case *ast.KeyValueExpr: + return "key/value association" + case *ast.LabeledStmt: + return "statement label" + case *ast.MapType: + return "map type" + case *ast.Package: + return "package" + case *ast.ParenExpr: + return "parenthesized " + NodeDescription(n.X) + case *ast.RangeStmt: + return "range loop" + case *ast.ReturnStmt: + return "return statement" + case *ast.SelectStmt: + return "select statement" + case *ast.SelectorExpr: + return "selector" + case *ast.SendStmt: + return "channel send" + case *ast.SliceExpr: + return "slice expression" + case *ast.StarExpr: + return "*-operation" // load/store expr or pointer type + case *ast.StructType: + return "struct type" + case *ast.SwitchStmt: + return "switch statement" + case *ast.TypeAssertExpr: + return "type assertion" + case *ast.TypeSpec: + return "type specification" + case *ast.TypeSwitchStmt: + return "type switch" + case *ast.UnaryExpr: + return fmt.Sprintf("unary %s operation", n.Op) + case *ast.ValueSpec: + return "value specification" + + } + panic(fmt.Sprintf("unexpected node type: %T", n)) +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go new file mode 100644 index 00000000..3e4b1953 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -0,0 +1,481 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package astutil contains common utilities for working with the Go AST. +package astutil // import "golang.org/x/tools/go/ast/astutil" + +import ( + "fmt" + "go/ast" + "go/token" + "strconv" + "strings" +) + +// AddImport adds the import path to the file f, if absent. +func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) { + return AddNamedImport(fset, f, "", path) +} + +// AddNamedImport adds the import with the given name and path to the file f, if absent. +// If name is not empty, it is used to rename the import. +// +// For example, calling +// AddNamedImport(fset, f, "pathpkg", "path") +// adds +// import pathpkg "path" +func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) { + if imports(f, name, path) { + return false + } + + newImport := &ast.ImportSpec{ + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(path), + }, + } + if name != "" { + newImport.Name = &ast.Ident{Name: name} + } + + // Find an import decl to add to. + // The goal is to find an existing import + // whose import path has the longest shared + // prefix with path. + var ( + bestMatch = -1 // length of longest shared prefix + lastImport = -1 // index in f.Decls of the file's final import decl + impDecl *ast.GenDecl // import decl containing the best match + impIndex = -1 // spec index in impDecl containing the best match + + isThirdPartyPath = isThirdParty(path) + ) + for i, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if ok && gen.Tok == token.IMPORT { + lastImport = i + // Do not add to import "C", to avoid disrupting the + // association with its doc comment, breaking cgo. + if declImports(gen, "C") { + continue + } + + // Match an empty import decl if that's all that is available. + if len(gen.Specs) == 0 && bestMatch == -1 { + impDecl = gen + } + + // Compute longest shared prefix with imports in this group and find best + // matched import spec. + // 1. Always prefer import spec with longest shared prefix. + // 2. While match length is 0, + // - for stdlib package: prefer first import spec. + // - for third party package: prefer first third party import spec. + // We cannot use last import spec as best match for third party package + // because grouped imports are usually placed last by goimports -local + // flag. + // See issue #19190. + seenAnyThirdParty := false + for j, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + p := importPath(impspec) + n := matchLen(p, path) + if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) { + bestMatch = n + impDecl = gen + impIndex = j + } + seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p) + } + } + } + + // If no import decl found, add one after the last import. + if impDecl == nil { + impDecl = &ast.GenDecl{ + Tok: token.IMPORT, + } + if lastImport >= 0 { + impDecl.TokPos = f.Decls[lastImport].End() + } else { + // There are no existing imports. + // Our new import, preceded by a blank line, goes after the package declaration + // and after the comment, if any, that starts on the same line as the + // package declaration. + impDecl.TokPos = f.Package + + file := fset.File(f.Package) + pkgLine := file.Line(f.Package) + for _, c := range f.Comments { + if file.Line(c.Pos()) > pkgLine { + break + } + // +2 for a blank line + impDecl.TokPos = c.End() + 2 + } + } + f.Decls = append(f.Decls, nil) + copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) + f.Decls[lastImport+1] = impDecl + } + + // Insert new import at insertAt. + insertAt := 0 + if impIndex >= 0 { + // insert after the found import + insertAt = impIndex + 1 + } + impDecl.Specs = append(impDecl.Specs, nil) + copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) + impDecl.Specs[insertAt] = newImport + pos := impDecl.Pos() + if insertAt > 0 { + // If there is a comment after an existing import, preserve the comment + // position by adding the new import after the comment. + if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil { + pos = spec.Comment.End() + } else { + // Assign same position as the previous import, + // so that the sorter sees it as being in the same block. + pos = impDecl.Specs[insertAt-1].Pos() + } + } + if newImport.Name != nil { + newImport.Name.NamePos = pos + } + newImport.Path.ValuePos = pos + newImport.EndPos = pos + + // Clean up parens. impDecl contains at least one spec. + if len(impDecl.Specs) == 1 { + // Remove unneeded parens. + impDecl.Lparen = token.NoPos + } else if !impDecl.Lparen.IsValid() { + // impDecl needs parens added. + impDecl.Lparen = impDecl.Specs[0].Pos() + } + + f.Imports = append(f.Imports, newImport) + + if len(f.Decls) <= 1 { + return true + } + + // Merge all the import declarations into the first one. + var first *ast.GenDecl + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { + continue + } + if first == nil { + first = gen + continue // Don't touch the first one. + } + // We now know there is more than one package in this import + // declaration. Ensure that it ends up parenthesized. + first.Lparen = first.Pos() + // Move the imports of the other import declaration to the first one. + for _, spec := range gen.Specs { + spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + first.Specs = append(first.Specs, spec) + } + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + i-- + } + + return true +} + +func isThirdParty(importPath string) bool { + // Third party package import path usually contains "." (".com", ".org", ...) + // This logic is taken from golang.org/x/tools/imports package. + return strings.Contains(importPath, ".") +} + +// DeleteImport deletes the import path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) { + return DeleteNamedImport(fset, f, "", path) +} + +// DeleteNamedImport deletes the import with the given name and path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { + var delspecs []*ast.ImportSpec + var delcomments []*ast.CommentGroup + + // Find the import nodes that import path, if any. + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT { + continue + } + for j := 0; j < len(gen.Specs); j++ { + spec := gen.Specs[j] + impspec := spec.(*ast.ImportSpec) + if importName(impspec) != name || importPath(impspec) != path { + continue + } + + // We found an import spec that imports path. + // Delete it. + delspecs = append(delspecs, impspec) + deleted = true + copy(gen.Specs[j:], gen.Specs[j+1:]) + gen.Specs = gen.Specs[:len(gen.Specs)-1] + + // If this was the last import spec in this decl, + // delete the decl, too. + if len(gen.Specs) == 0 { + copy(f.Decls[i:], f.Decls[i+1:]) + f.Decls = f.Decls[:len(f.Decls)-1] + i-- + break + } else if len(gen.Specs) == 1 { + if impspec.Doc != nil { + delcomments = append(delcomments, impspec.Doc) + } + if impspec.Comment != nil { + delcomments = append(delcomments, impspec.Comment) + } + for _, cg := range f.Comments { + // Found comment on the same line as the import spec. + if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { + delcomments = append(delcomments, cg) + break + } + } + + spec := gen.Specs[0].(*ast.ImportSpec) + + // Move the documentation right after the import decl. + if spec.Doc != nil { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + } + for _, cg := range f.Comments { + if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + break + } + } + } + if j > 0 { + lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) + lastLine := fset.Position(lastImpspec.Path.ValuePos).Line + line := fset.Position(impspec.Path.ValuePos).Line + + // We deleted an entry but now there may be + // a blank line-sized hole where the import was. + if line-lastLine > 1 { + // There was a blank line immediately preceding the deleted import, + // so there's no need to close the hole. + // Do nothing. + } else if line != fset.File(gen.Rparen).LineCount() { + // There was no blank line. Close the hole. + fset.File(gen.Rparen).MergeLine(line) + } + } + j-- + } + } + + // Delete imports from f.Imports. + for i := 0; i < len(f.Imports); i++ { + imp := f.Imports[i] + for j, del := range delspecs { + if imp == del { + copy(f.Imports[i:], f.Imports[i+1:]) + f.Imports = f.Imports[:len(f.Imports)-1] + copy(delspecs[j:], delspecs[j+1:]) + delspecs = delspecs[:len(delspecs)-1] + i-- + break + } + } + } + + // Delete comments from f.Comments. + for i := 0; i < len(f.Comments); i++ { + cg := f.Comments[i] + for j, del := range delcomments { + if cg == del { + copy(f.Comments[i:], f.Comments[i+1:]) + f.Comments = f.Comments[:len(f.Comments)-1] + copy(delcomments[j:], delcomments[j+1:]) + delcomments = delcomments[:len(delcomments)-1] + i-- + break + } + } + } + + if len(delspecs) > 0 { + panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) + } + + return +} + +// RewriteImport rewrites any import of path oldPath to path newPath. +func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) { + for _, imp := range f.Imports { + if importPath(imp) == oldPath { + rewrote = true + // record old End, because the default is to compute + // it using the length of imp.Path.Value. + imp.EndPos = imp.End() + imp.Path.Value = strconv.Quote(newPath) + } + } + return +} + +// UsesImport reports whether a given import is used. +func UsesImport(f *ast.File, path string) (used bool) { + spec := importSpec(f, path) + if spec == nil { + return + } + + name := spec.Name.String() + switch name { + case "": + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + case "_", ".": + // Not sure if this import is used - err on the side of caution. + return true + } + + ast.Walk(visitFn(func(n ast.Node) { + sel, ok := n.(*ast.SelectorExpr) + if ok && isTopName(sel.X, name) { + used = true + } + }), f) + + return +} + +type visitFn func(node ast.Node) + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + fn(node) + return fn +} + +// imports reports whether f has an import with the specified name and path. +func imports(f *ast.File, name, path string) bool { + for _, s := range f.Imports { + if importName(s) == name && importPath(s) == path { + return true + } + } + return false +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if importPath(s) == path { + return s + } + } + return nil +} + +// importName returns the name of s, +// or "" if the import is not named. +func importName(s *ast.ImportSpec) string { + if s.Name == nil { + return "" + } + return s.Name.Name +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} + +// declImports reports whether gen contains an import of path. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +// matchLen returns the length of the longest path segment prefix shared by x and y. +func matchLen(x, y string) int { + n := 0 + for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ { + if x[i] == '/' { + n++ + } + } + return n +} + +// isTopName returns true if n is a top-level unresolved identifier with the given name. +func isTopName(n ast.Expr, name string) bool { + id, ok := n.(*ast.Ident) + return ok && id.Name == name && id.Obj == nil +} + +// Imports returns the file imports grouped by paragraph. +func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { + var groups [][]*ast.ImportSpec + + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.IMPORT { + break + } + + group := []*ast.ImportSpec{} + + var lastLine int + for _, spec := range genDecl.Specs { + importSpec := spec.(*ast.ImportSpec) + pos := importSpec.Path.ValuePos + line := fset.Position(pos).Line + if lastLine > 0 && pos > 0 && line-lastLine > 1 { + groups = append(groups, group) + group = []*ast.ImportSpec{} + } + group = append(group, importSpec) + lastLine = line + } + groups = append(groups, group) + } + + return groups +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go new file mode 100644 index 00000000..cf72ea99 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -0,0 +1,477 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import ( + "fmt" + "go/ast" + "reflect" + "sort" +) + +// An ApplyFunc is invoked by Apply for each node n, even if n is nil, +// before and/or after the node's children, using a Cursor describing +// the current node and providing operations on it. +// +// The return value of ApplyFunc controls the syntax tree traversal. +// See Apply for details. +type ApplyFunc func(*Cursor) bool + +// Apply traverses a syntax tree recursively, starting with root, +// and calling pre and post for each node as described below. +// Apply returns the syntax tree, possibly modified. +// +// If pre is not nil, it is called for each node before the node's +// children are traversed (pre-order). If pre returns false, no +// children are traversed, and post is not called for that node. +// +// If post is not nil, and a prior call of pre didn't return false, +// post is called for each node after its children are traversed +// (post-order). If post returns false, traversal is terminated and +// Apply returns immediately. +// +// Only fields that refer to AST nodes are considered children; +// i.e., token.Pos, Scopes, Objects, and fields of basic types +// (strings, etc.) are ignored. +// +// Children are traversed in the order in which they appear in the +// respective node's struct definition. A package's files are +// traversed in the filenames' alphabetical order. +// +func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) { + parent := &struct{ ast.Node }{root} + defer func() { + if r := recover(); r != nil && r != abort { + panic(r) + } + result = parent.Node + }() + a := &application{pre: pre, post: post} + a.apply(parent, "Node", nil, root) + return +} + +var abort = new(int) // singleton, to signal termination of Apply + +// A Cursor describes a node encountered during Apply. +// Information about the node and its parent is available +// from the Node, Parent, Name, and Index methods. +// +// If p is a variable of type and value of the current parent node +// c.Parent(), and f is the field identifier with name c.Name(), +// the following invariants hold: +// +// p.f == c.Node() if c.Index() < 0 +// p.f[c.Index()] == c.Node() if c.Index() >= 0 +// +// The methods Replace, Delete, InsertBefore, and InsertAfter +// can be used to change the AST without disrupting Apply. +type Cursor struct { + parent ast.Node + name string + iter *iterator // valid if non-nil + node ast.Node +} + +// Node returns the current Node. +func (c *Cursor) Node() ast.Node { return c.node } + +// Parent returns the parent of the current Node. +func (c *Cursor) Parent() ast.Node { return c.parent } + +// Name returns the name of the parent Node field that contains the current Node. +// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns +// the filename for the current Node. +func (c *Cursor) Name() string { return c.name } + +// Index reports the index >= 0 of the current Node in the slice of Nodes that +// contains it, or a value < 0 if the current Node is not part of a slice. +// The index of the current node changes if InsertBefore is called while +// processing the current node. +func (c *Cursor) Index() int { + if c.iter != nil { + return c.iter.index + } + return -1 +} + +// field returns the current node's parent field value. +func (c *Cursor) field() reflect.Value { + return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name) +} + +// Replace replaces the current Node with n. +// The replacement node is not walked by Apply. +func (c *Cursor) Replace(n ast.Node) { + if _, ok := c.node.(*ast.File); ok { + file, ok := n.(*ast.File) + if !ok { + panic("attempt to replace *ast.File with non-*ast.File") + } + c.parent.(*ast.Package).Files[c.name] = file + return + } + + v := c.field() + if i := c.Index(); i >= 0 { + v = v.Index(i) + } + v.Set(reflect.ValueOf(n)) +} + +// Delete deletes the current Node from its containing slice. +// If the current Node is not part of a slice, Delete panics. +// As a special case, if the current node is a package file, +// Delete removes it from the package's Files map. +func (c *Cursor) Delete() { + if _, ok := c.node.(*ast.File); ok { + delete(c.parent.(*ast.Package).Files, c.name) + return + } + + i := c.Index() + if i < 0 { + panic("Delete node not contained in slice") + } + v := c.field() + l := v.Len() + reflect.Copy(v.Slice(i, l), v.Slice(i+1, l)) + v.Index(l - 1).Set(reflect.Zero(v.Type().Elem())) + v.SetLen(l - 1) + c.iter.step-- +} + +// InsertAfter inserts n after the current Node in its containing slice. +// If the current Node is not part of a slice, InsertAfter panics. +// Apply does not walk n. +func (c *Cursor) InsertAfter(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertAfter node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l)) + v.Index(i + 1).Set(reflect.ValueOf(n)) + c.iter.step++ +} + +// InsertBefore inserts n before the current Node in its containing slice. +// If the current Node is not part of a slice, InsertBefore panics. +// Apply will not walk n. +func (c *Cursor) InsertBefore(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertBefore node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+1, l), v.Slice(i, l)) + v.Index(i).Set(reflect.ValueOf(n)) + c.iter.index++ +} + +// application carries all the shared data so we can pass it around cheaply. +type application struct { + pre, post ApplyFunc + cursor Cursor + iter iterator +} + +func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { + // convert typed nil into untyped nil + if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() { + n = nil + } + + // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead + saved := a.cursor + a.cursor.parent = parent + a.cursor.name = name + a.cursor.iter = iter + a.cursor.node = n + + if a.pre != nil && !a.pre(&a.cursor) { + a.cursor = saved + return + } + + // walk children + // (the order of the cases matches the order of the corresponding node types in go/ast) + switch n := n.(type) { + case nil: + // nothing to do + + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + if n != nil { + a.applyList(n, "List") + } + + case *ast.Field: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.FieldList: + a.applyList(n, "List") + + // Expressions + case *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.Ellipsis: + a.apply(n, "Elt", nil, n.Elt) + + case *ast.FuncLit: + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + case *ast.CompositeLit: + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Elts") + + case *ast.ParenExpr: + a.apply(n, "X", nil, n.X) + + case *ast.SelectorExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Sel", nil, n.Sel) + + case *ast.IndexExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Index", nil, n.Index) + + case *ast.SliceExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Low", nil, n.Low) + a.apply(n, "High", nil, n.High) + a.apply(n, "Max", nil, n.Max) + + case *ast.TypeAssertExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Type", nil, n.Type) + + case *ast.CallExpr: + a.apply(n, "Fun", nil, n.Fun) + a.applyList(n, "Args") + + case *ast.StarExpr: + a.apply(n, "X", nil, n.X) + + case *ast.UnaryExpr: + a.apply(n, "X", nil, n.X) + + case *ast.BinaryExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Y", nil, n.Y) + + case *ast.KeyValueExpr: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + // Types + case *ast.ArrayType: + a.apply(n, "Len", nil, n.Len) + a.apply(n, "Elt", nil, n.Elt) + + case *ast.StructType: + a.apply(n, "Fields", nil, n.Fields) + + case *ast.FuncType: + a.apply(n, "Params", nil, n.Params) + a.apply(n, "Results", nil, n.Results) + + case *ast.InterfaceType: + a.apply(n, "Methods", nil, n.Methods) + + case *ast.MapType: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + case *ast.ChanType: + a.apply(n, "Value", nil, n.Value) + + // Statements + case *ast.BadStmt: + // nothing to do + + case *ast.DeclStmt: + a.apply(n, "Decl", nil, n.Decl) + + case *ast.EmptyStmt: + // nothing to do + + case *ast.LabeledStmt: + a.apply(n, "Label", nil, n.Label) + a.apply(n, "Stmt", nil, n.Stmt) + + case *ast.ExprStmt: + a.apply(n, "X", nil, n.X) + + case *ast.SendStmt: + a.apply(n, "Chan", nil, n.Chan) + a.apply(n, "Value", nil, n.Value) + + case *ast.IncDecStmt: + a.apply(n, "X", nil, n.X) + + case *ast.AssignStmt: + a.applyList(n, "Lhs") + a.applyList(n, "Rhs") + + case *ast.GoStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.DeferStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.ReturnStmt: + a.applyList(n, "Results") + + case *ast.BranchStmt: + a.apply(n, "Label", nil, n.Label) + + case *ast.BlockStmt: + a.applyList(n, "List") + + case *ast.IfStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Body", nil, n.Body) + a.apply(n, "Else", nil, n.Else) + + case *ast.CaseClause: + a.applyList(n, "List") + a.applyList(n, "Body") + + case *ast.SwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Body", nil, n.Body) + + case *ast.TypeSwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Assign", nil, n.Assign) + a.apply(n, "Body", nil, n.Body) + + case *ast.CommClause: + a.apply(n, "Comm", nil, n.Comm) + a.applyList(n, "Body") + + case *ast.SelectStmt: + a.apply(n, "Body", nil, n.Body) + + case *ast.ForStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Post", nil, n.Post) + a.apply(n, "Body", nil, n.Body) + + case *ast.RangeStmt: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + a.apply(n, "X", nil, n.X) + a.apply(n, "Body", nil, n.Body) + + // Declarations + case *ast.ImportSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Path", nil, n.Path) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.ValueSpec: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Values") + a.apply(n, "Comment", nil, n.Comment) + + case *ast.TypeSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.BadDecl: + // nothing to do + + case *ast.GenDecl: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Specs") + + case *ast.FuncDecl: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Recv", nil, n.Recv) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + // Files and packages + case *ast.File: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.applyList(n, "Decls") + // Don't walk n.Comments; they have either been walked already if + // they are Doc comments, or they can be easily walked explicitly. + + case *ast.Package: + // collect and sort names for reproducible behavior + var names []string + for name := range n.Files { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + a.apply(n, name, nil, n.Files[name]) + } + + default: + panic(fmt.Sprintf("Apply: unexpected node type %T", n)) + } + + if a.post != nil && !a.post(&a.cursor) { + panic(abort) + } + + a.cursor = saved +} + +// An iterator controls iteration over a slice of nodes. +type iterator struct { + index, step int +} + +func (a *application) applyList(parent ast.Node, name string) { + // avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead + saved := a.iter + a.iter.index = 0 + for { + // must reload parent.name each time, since cursor modifications might change it + v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name) + if a.iter.index >= v.Len() { + break + } + + // element x may be nil in a bad AST - be cautious + var x ast.Node + if e := v.Index(a.iter.index); e.IsValid() { + x = e.Interface().(ast.Node) + } + + a.iter.step = 1 + a.apply(parent, name, &a.iter, x) + a.iter.index += a.iter.step + } + a.iter = saved +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go new file mode 100644 index 00000000..76306298 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -0,0 +1,14 @@ +package astutil + +import "go/ast" + +// Unparen returns e with any enclosing parentheses stripped. +func Unparen(e ast.Expr) ast.Expr { + for { + p, ok := e.(*ast.ParenExpr) + if !ok { + return e + } + e = p.X + } +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go new file mode 100644 index 00000000..98b3987b --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -0,0 +1,109 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gcexportdata provides functions for locating, reading, and +// writing export data files containing type information produced by the +// gc compiler. This package supports go1.7 export data format and all +// later versions. +// +// Although it might seem convenient for this package to live alongside +// go/types in the standard library, this would cause version skew +// problems for developer tools that use it, since they must be able to +// consume the outputs of the gc compiler both before and after a Go +// update such as from Go 1.7 to Go 1.8. Because this package lives in +// golang.org/x/tools, sites can update their version of this repo some +// time before the Go 1.8 release and rebuild and redeploy their +// developer tools, which will then be able to consume both Go 1.7 and +// Go 1.8 export data files, so they will work before and after the +// Go update. (See discussion at https://golang.org/issue/15651.) +// +package gcexportdata // import "golang.org/x/tools/go/gcexportdata" + +import ( + "bufio" + "bytes" + "fmt" + "go/token" + "go/types" + "io" + "io/ioutil" + + "golang.org/x/tools/go/internal/gcimporter" +) + +// Find returns the name of an object (.o) or archive (.a) file +// containing type information for the specified import path, +// using the workspace layout conventions of go/build. +// If no file was found, an empty filename is returned. +// +// A relative srcDir is interpreted relative to the current working directory. +// +// Find also returns the package's resolved (canonical) import path, +// reflecting the effects of srcDir and vendoring on importPath. +func Find(importPath, srcDir string) (filename, path string) { + return gcimporter.FindPkg(importPath, srcDir) +} + +// NewReader returns a reader for the export data section of an object +// (.o) or archive (.a) file read from r. The new reader may provide +// additional trailing data beyond the end of the export data. +func NewReader(r io.Reader) (io.Reader, error) { + buf := bufio.NewReader(r) + _, err := gcimporter.FindExportData(buf) + // If we ever switch to a zip-like archive format with the ToC + // at the end, we can return the correct portion of export data, + // but for now we must return the entire rest of the file. + return buf, err +} + +// Read reads export data from in, decodes it, and returns type +// information for the package. +// The package name is specified by path. +// File position information is added to fset. +// +// Read may inspect and add to the imports map to ensure that references +// within the export data to other packages are consistent. The caller +// must ensure that imports[path] does not exist, or exists but is +// incomplete (see types.Package.Complete), and Read inserts the +// resulting package into this map entry. +// +// On return, the state of the reader is undefined. +func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) { + data, err := ioutil.ReadAll(in) + if err != nil { + return nil, fmt.Errorf("reading export data for %q: %v", path, err) + } + + if bytes.HasPrefix(data, []byte("!")) { + return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path) + } + + // The App Engine Go runtime v1.6 uses the old export data format. + // TODO(adonovan): delete once v1.7 has been around for a while. + if bytes.HasPrefix(data, []byte("package ")) { + return gcimporter.ImportData(imports, path, path, bytes.NewReader(data)) + } + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 && data[0] == 'i' { + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) + return pkg, err + } + + _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + return pkg, err +} + +// Write writes encoded type information for the specified package to out. +// The FileSet provides file position information for named objects. +func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + b, err := gcimporter.BExportData(fset, pkg) + if err != nil { + return err + } + _, err = out.Write(b) + return err +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/vendor/golang.org/x/tools/go/gcexportdata/importer.go new file mode 100644 index 00000000..efe221e7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/importer.go @@ -0,0 +1,73 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcexportdata + +import ( + "fmt" + "go/token" + "go/types" + "os" +) + +// NewImporter returns a new instance of the types.Importer interface +// that reads type information from export data files written by gc. +// The Importer also satisfies types.ImporterFrom. +// +// Export data files are located using "go build" workspace conventions +// and the build.Default context. +// +// Use this importer instead of go/importer.For("gc", ...) to avoid the +// version-skew problems described in the documentation of this package, +// or to control the FileSet or access the imports map populated during +// package loading. +// +func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { + return importer{fset, imports} +} + +type importer struct { + fset *token.FileSet + imports map[string]*types.Package +} + +func (imp importer) Import(importPath string) (*types.Package, error) { + return imp.ImportFrom(importPath, "", 0) +} + +func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) { + filename, path := Find(importPath, srcDir) + if filename == "" { + if importPath == "unsafe" { + // Even for unsafe, call Find first in case + // the package was vendored. + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %s", importPath) + } + + if pkg, ok := imp.imports[path]; ok && pkg.Complete() { + return pkg, nil // cache hit + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + f.Close() + if err != nil { + // add file name to error + err = fmt.Errorf("reading export data: %s: %v", filename, err) + } + }() + + r, err := NewReader(f) + if err != nil { + return nil, err + } + + return Read(r, imp.fset, imp.imports, path) +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/main.go b/vendor/golang.org/x/tools/go/gcexportdata/main.go new file mode 100644 index 00000000..2713dce6 --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/main.go @@ -0,0 +1,99 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// The gcexportdata command is a diagnostic tool that displays the +// contents of gc export data files. +package main + +import ( + "flag" + "fmt" + "go/token" + "go/types" + "log" + "os" + + "golang.org/x/tools/go/gcexportdata" + "golang.org/x/tools/go/types/typeutil" +) + +var packageFlag = flag.String("package", "", "alternative package to print") + +func main() { + log.SetPrefix("gcexportdata: ") + log.SetFlags(0) + flag.Usage = func() { + fmt.Fprintln(os.Stderr, "usage: gcexportdata [-package path] file.a") + } + flag.Parse() + if flag.NArg() != 1 { + flag.Usage() + os.Exit(2) + } + filename := flag.Args()[0] + + f, err := os.Open(filename) + if err != nil { + log.Fatal(err) + } + + r, err := gcexportdata.NewReader(f) + if err != nil { + log.Fatalf("%s: %s", filename, err) + } + + // Decode the package. + const primary = "" + imports := make(map[string]*types.Package) + fset := token.NewFileSet() + pkg, err := gcexportdata.Read(r, fset, imports, primary) + if err != nil { + log.Fatalf("%s: %s", filename, err) + } + + // Optionally select an indirectly mentioned package. + if *packageFlag != "" { + pkg = imports[*packageFlag] + if pkg == nil { + fmt.Fprintf(os.Stderr, "export data file %s does not mention %s; has:\n", + filename, *packageFlag) + for p := range imports { + if p != primary { + fmt.Fprintf(os.Stderr, "\t%s\n", p) + } + } + os.Exit(1) + } + } + + // Print all package-level declarations, including non-exported ones. + fmt.Printf("package %s\n", pkg.Name()) + for _, imp := range pkg.Imports() { + fmt.Printf("import %q\n", imp.Path()) + } + qual := func(p *types.Package) string { + if pkg == p { + return "" + } + return p.Name() + } + scope := pkg.Scope() + for _, name := range scope.Names() { + obj := scope.Lookup(name) + fmt.Printf("%s: %s\n", + fset.Position(obj.Pos()), + types.ObjectString(obj, qual)) + + // For types, print each method. + if _, ok := obj.(*types.TypeName); ok { + for _, method := range typeutil.IntuitiveMethodSet(obj.Type(), nil) { + fmt.Printf("%s: %s\n", + fset.Position(method.Obj().Pos()), + types.SelectionString(method, qual)) + } + } + } +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go new file mode 100644 index 00000000..a807d0aa --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go @@ -0,0 +1,852 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go; +// see that file for specification of the format. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "math" + "math/big" + "sort" + "strings" +) + +// If debugFormat is set, each integer and string value is preceded by a marker +// and position information in the encoding. This mechanism permits an importer +// to recognize immediately when it is out of sync. The importer recognizes this +// mode automatically (i.e., it can import export data produced with debugging +// support even if debugFormat is not set at the time of import). This mode will +// lead to massively larger export data (by a factor of 2 to 3) and should only +// be enabled during development and debugging. +// +// NOTE: This flag is the first flag to enable if importing dies because of +// (suspected) format errors, and whenever a change is made to the format. +const debugFormat = false // default: false + +// If trace is set, debugging output is printed to std out. +const trace = false // default: false + +// Current export format version. Increase with each format change. +// Note: The latest binary (non-indexed) export format is at version 6. +// This exporter is still at level 4, but it doesn't matter since +// the binary importer can handle older versions just fine. +// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE +// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE +// 4: type name objects support type aliases, uses aliasTag +// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) +// 2: removed unused bool in ODCL export (compiler only) +// 1: header format change (more regular), export package for _ struct fields +// 0: Go1.7 encoding +const exportVersion = 4 + +// trackAllTypes enables cycle tracking for all types, not just named +// types. The existing compiler invariants assume that unnamed types +// that are not completely set up are not used, or else there are spurious +// errors. +// If disabled, only named types are tracked, possibly leading to slightly +// less efficient encoding in rare cases. It also prevents the export of +// some corner-case type declarations (but those are not handled correctly +// with with the textual export format either). +// TODO(gri) enable and remove once issues caused by it are fixed +const trackAllTypes = false + +type exporter struct { + fset *token.FileSet + out bytes.Buffer + + // object -> index maps, indexed in order of serialization + strIndex map[string]int + pkgIndex map[*types.Package]int + typIndex map[types.Type]int + + // position encoding + posInfoFormat bool + prevFile string + prevLine int + + // debugging support + written int // bytes written + indent int // for trace +} + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +func internalErrorf(format string, args ...interface{}) error { + return internalError(fmt.Sprintf(format, args...)) +} + +// BExportData returns binary export data for pkg. +// If no file set is provided, position info will be missing. +func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + + p := exporter{ + fset: fset, + strIndex: map[string]int{"": 0}, // empty string is mapped to 0 + pkgIndex: make(map[*types.Package]int), + typIndex: make(map[types.Type]int), + posInfoFormat: true, // TODO(gri) might become a flag, eventually + } + + // write version info + // The version string must start with "version %d" where %d is the version + // number. Additional debugging information may follow after a blank; that + // text is ignored by the importer. + p.rawStringln(fmt.Sprintf("version %d", exportVersion)) + var debug string + if debugFormat { + debug = "debug" + } + p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly + p.bool(trackAllTypes) + p.bool(p.posInfoFormat) + + // --- generic export data --- + + // populate type map with predeclared "known" types + for index, typ := range predeclared() { + p.typIndex[typ] = index + } + if len(p.typIndex) != len(predeclared()) { + return nil, internalError("duplicate entries in type map?") + } + + // write package data + p.pkg(pkg, true) + if trace { + p.tracef("\n") + } + + // write objects + objcount := 0 + scope := pkg.Scope() + for _, name := range scope.Names() { + if !ast.IsExported(name) { + continue + } + if trace { + p.tracef("\n") + } + p.obj(scope.Lookup(name)) + objcount++ + } + + // indicate end of list + if trace { + p.tracef("\n") + } + p.tag(endTag) + + // for self-verification only (redundant) + p.int(objcount) + + if trace { + p.tracef("\n") + } + + // --- end of export data --- + + return p.out.Bytes(), nil +} + +func (p *exporter) pkg(pkg *types.Package, emptypath bool) { + if pkg == nil { + panic(internalError("unexpected nil pkg")) + } + + // if we saw the package before, write its index (>= 0) + if i, ok := p.pkgIndex[pkg]; ok { + p.index('P', i) + return + } + + // otherwise, remember the package, write the package tag (< 0) and package data + if trace { + p.tracef("P%d = { ", len(p.pkgIndex)) + defer p.tracef("} ") + } + p.pkgIndex[pkg] = len(p.pkgIndex) + + p.tag(packageTag) + p.string(pkg.Name()) + if emptypath { + p.string("") + } else { + p.string(pkg.Path()) + } +} + +func (p *exporter) obj(obj types.Object) { + switch obj := obj.(type) { + case *types.Const: + p.tag(constTag) + p.pos(obj) + p.qualifiedName(obj) + p.typ(obj.Type()) + p.value(obj.Val()) + + case *types.TypeName: + if obj.IsAlias() { + p.tag(aliasTag) + p.pos(obj) + p.qualifiedName(obj) + } else { + p.tag(typeTag) + } + p.typ(obj.Type()) + + case *types.Var: + p.tag(varTag) + p.pos(obj) + p.qualifiedName(obj) + p.typ(obj.Type()) + + case *types.Func: + p.tag(funcTag) + p.pos(obj) + p.qualifiedName(obj) + sig := obj.Type().(*types.Signature) + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) + + default: + panic(internalErrorf("unexpected object %v (%T)", obj, obj)) + } +} + +func (p *exporter) pos(obj types.Object) { + if !p.posInfoFormat { + return + } + + file, line := p.fileLine(obj) + if file == p.prevFile { + // common case: write line delta + // delta == 0 means different file or no line change + delta := line - p.prevLine + p.int(delta) + if delta == 0 { + p.int(-1) // -1 means no file change + } + } else { + // different file + p.int(0) + // Encode filename as length of common prefix with previous + // filename, followed by (possibly empty) suffix. Filenames + // frequently share path prefixes, so this can save a lot + // of space and make export data size less dependent on file + // path length. The suffix is unlikely to be empty because + // file names tend to end in ".go". + n := commonPrefixLen(p.prevFile, file) + p.int(n) // n >= 0 + p.string(file[n:]) // write suffix only + p.prevFile = file + p.int(line) + } + p.prevLine = line +} + +func (p *exporter) fileLine(obj types.Object) (file string, line int) { + if p.fset != nil { + pos := p.fset.Position(obj.Pos()) + file = pos.Filename + line = pos.Line + } + return +} + +func commonPrefixLen(a, b string) int { + if len(a) > len(b) { + a, b = b, a + } + // len(a) <= len(b) + i := 0 + for i < len(a) && a[i] == b[i] { + i++ + } + return i +} + +func (p *exporter) qualifiedName(obj types.Object) { + p.string(obj.Name()) + p.pkg(obj.Pkg(), false) +} + +func (p *exporter) typ(t types.Type) { + if t == nil { + panic(internalError("nil type")) + } + + // Possible optimization: Anonymous pointer types *T where + // T is a named type are common. We could canonicalize all + // such types *T to a single type PT = *T. This would lead + // to at most one *T entry in typIndex, and all future *T's + // would be encoded as the respective index directly. Would + // save 1 byte (pointerTag) per *T and reduce the typIndex + // size (at the cost of a canonicalization map). We can do + // this later, without encoding format change. + + // if we saw the type before, write its index (>= 0) + if i, ok := p.typIndex[t]; ok { + p.index('T', i) + return + } + + // otherwise, remember the type, write the type tag (< 0) and type data + if trackAllTypes { + if trace { + p.tracef("T%d = {>\n", len(p.typIndex)) + defer p.tracef("<\n} ") + } + p.typIndex[t] = len(p.typIndex) + } + + switch t := t.(type) { + case *types.Named: + if !trackAllTypes { + // if we don't track all types, track named types now + p.typIndex[t] = len(p.typIndex) + } + + p.tag(namedTag) + p.pos(t.Obj()) + p.qualifiedName(t.Obj()) + p.typ(t.Underlying()) + if !types.IsInterface(t) { + p.assocMethods(t) + } + + case *types.Array: + p.tag(arrayTag) + p.int64(t.Len()) + p.typ(t.Elem()) + + case *types.Slice: + p.tag(sliceTag) + p.typ(t.Elem()) + + case *dddSlice: + p.tag(dddTag) + p.typ(t.elem) + + case *types.Struct: + p.tag(structTag) + p.fieldList(t) + + case *types.Pointer: + p.tag(pointerTag) + p.typ(t.Elem()) + + case *types.Signature: + p.tag(signatureTag) + p.paramList(t.Params(), t.Variadic()) + p.paramList(t.Results(), false) + + case *types.Interface: + p.tag(interfaceTag) + p.iface(t) + + case *types.Map: + p.tag(mapTag) + p.typ(t.Key()) + p.typ(t.Elem()) + + case *types.Chan: + p.tag(chanTag) + p.int(int(3 - t.Dir())) // hack + p.typ(t.Elem()) + + default: + panic(internalErrorf("unexpected type %T: %s", t, t)) + } +} + +func (p *exporter) assocMethods(named *types.Named) { + // Sort methods (for determinism). + var methods []*types.Func + for i := 0; i < named.NumMethods(); i++ { + methods = append(methods, named.Method(i)) + } + sort.Sort(methodsByName(methods)) + + p.int(len(methods)) + + if trace && methods != nil { + p.tracef("associated methods {>\n") + } + + for i, m := range methods { + if trace && i > 0 { + p.tracef("\n") + } + + p.pos(m) + name := m.Name() + p.string(name) + if !exported(name) { + p.pkg(m.Pkg(), false) + } + + sig := m.Type().(*types.Signature) + p.paramList(types.NewTuple(sig.Recv()), false) + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) + p.int(0) // dummy value for go:nointerface pragma - ignored by importer + } + + if trace && methods != nil { + p.tracef("<\n} ") + } +} + +type methodsByName []*types.Func + +func (x methodsByName) Len() int { return len(x) } +func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() } + +func (p *exporter) fieldList(t *types.Struct) { + if trace && t.NumFields() > 0 { + p.tracef("fields {>\n") + defer p.tracef("<\n} ") + } + + p.int(t.NumFields()) + for i := 0; i < t.NumFields(); i++ { + if trace && i > 0 { + p.tracef("\n") + } + p.field(t.Field(i)) + p.string(t.Tag(i)) + } +} + +func (p *exporter) field(f *types.Var) { + if !f.IsField() { + panic(internalError("field expected")) + } + + p.pos(f) + p.fieldName(f) + p.typ(f.Type()) +} + +func (p *exporter) iface(t *types.Interface) { + // TODO(gri): enable importer to load embedded interfaces, + // then emit Embeddeds and ExplicitMethods separately here. + p.int(0) + + n := t.NumMethods() + if trace && n > 0 { + p.tracef("methods {>\n") + defer p.tracef("<\n} ") + } + p.int(n) + for i := 0; i < n; i++ { + if trace && i > 0 { + p.tracef("\n") + } + p.method(t.Method(i)) + } +} + +func (p *exporter) method(m *types.Func) { + sig := m.Type().(*types.Signature) + if sig.Recv() == nil { + panic(internalError("method expected")) + } + + p.pos(m) + p.string(m.Name()) + if m.Name() != "_" && !ast.IsExported(m.Name()) { + p.pkg(m.Pkg(), false) + } + + // interface method; no need to encode receiver. + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) +} + +func (p *exporter) fieldName(f *types.Var) { + name := f.Name() + + if f.Anonymous() { + // anonymous field - we distinguish between 3 cases: + // 1) field name matches base type name and is exported + // 2) field name matches base type name and is not exported + // 3) field name doesn't match base type name (alias name) + bname := basetypeName(f.Type()) + if name == bname { + if ast.IsExported(name) { + name = "" // 1) we don't need to know the field name or package + } else { + name = "?" // 2) use unexported name "?" to force package export + } + } else { + // 3) indicate alias and export name as is + // (this requires an extra "@" but this is a rare case) + p.string("@") + } + } + + p.string(name) + if name != "" && !ast.IsExported(name) { + p.pkg(f.Pkg(), false) + } +} + +func basetypeName(typ types.Type) string { + switch typ := deref(typ).(type) { + case *types.Basic: + return typ.Name() + case *types.Named: + return typ.Obj().Name() + default: + return "" // unnamed type + } +} + +func (p *exporter) paramList(params *types.Tuple, variadic bool) { + // use negative length to indicate unnamed parameters + // (look at the first parameter only since either all + // names are present or all are absent) + n := params.Len() + if n > 0 && params.At(0).Name() == "" { + n = -n + } + p.int(n) + for i := 0; i < params.Len(); i++ { + q := params.At(i) + t := q.Type() + if variadic && i == params.Len()-1 { + t = &dddSlice{t.(*types.Slice).Elem()} + } + p.typ(t) + if n > 0 { + name := q.Name() + p.string(name) + if name != "_" { + p.pkg(q.Pkg(), false) + } + } + p.string("") // no compiler-specific info + } +} + +func (p *exporter) value(x constant.Value) { + if trace { + p.tracef("= ") + } + + switch x.Kind() { + case constant.Bool: + tag := falseTag + if constant.BoolVal(x) { + tag = trueTag + } + p.tag(tag) + + case constant.Int: + if v, exact := constant.Int64Val(x); exact { + // common case: x fits into an int64 - use compact encoding + p.tag(int64Tag) + p.int64(v) + return + } + // uncommon case: large x - use float encoding + // (powers of 2 will be encoded efficiently with exponent) + p.tag(floatTag) + p.float(constant.ToFloat(x)) + + case constant.Float: + p.tag(floatTag) + p.float(x) + + case constant.Complex: + p.tag(complexTag) + p.float(constant.Real(x)) + p.float(constant.Imag(x)) + + case constant.String: + p.tag(stringTag) + p.string(constant.StringVal(x)) + + case constant.Unknown: + // package contains type errors + p.tag(unknownTag) + + default: + panic(internalErrorf("unexpected value %v (%T)", x, x)) + } +} + +func (p *exporter) float(x constant.Value) { + if x.Kind() != constant.Float { + panic(internalErrorf("unexpected constant %v, want float", x)) + } + // extract sign (there is no -0) + sign := constant.Sign(x) + if sign == 0 { + // x == 0 + p.int(0) + return + } + // x != 0 + + var f big.Float + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + r := valueToRat(num) + f.SetRat(r.Quo(r, valueToRat(denom))) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + f.SetFloat64(math.MaxFloat64) // FIXME + } + + // extract exponent such that 0.5 <= m < 1.0 + var m big.Float + exp := f.MantExp(&m) + + // extract mantissa as *big.Int + // - set exponent large enough so mant satisfies mant.IsInt() + // - get *big.Int from mant + m.SetMantExp(&m, int(m.MinPrec())) + mant, acc := m.Int(nil) + if acc != big.Exact { + panic(internalError("internal error")) + } + + p.int(sign) + p.int(exp) + p.string(string(mant.Bytes())) +} + +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + +func (p *exporter) bool(b bool) bool { + if trace { + p.tracef("[") + defer p.tracef("= %v] ", b) + } + + x := 0 + if b { + x = 1 + } + p.int(x) + return b +} + +// ---------------------------------------------------------------------------- +// Low-level encoders + +func (p *exporter) index(marker byte, index int) { + if index < 0 { + panic(internalError("invalid index < 0")) + } + if debugFormat { + p.marker('t') + } + if trace { + p.tracef("%c%d ", marker, index) + } + p.rawInt64(int64(index)) +} + +func (p *exporter) tag(tag int) { + if tag >= 0 { + panic(internalError("invalid tag >= 0")) + } + if debugFormat { + p.marker('t') + } + if trace { + p.tracef("%s ", tagString[-tag]) + } + p.rawInt64(int64(tag)) +} + +func (p *exporter) int(x int) { + p.int64(int64(x)) +} + +func (p *exporter) int64(x int64) { + if debugFormat { + p.marker('i') + } + if trace { + p.tracef("%d ", x) + } + p.rawInt64(x) +} + +func (p *exporter) string(s string) { + if debugFormat { + p.marker('s') + } + if trace { + p.tracef("%q ", s) + } + // if we saw the string before, write its index (>= 0) + // (the empty string is mapped to 0) + if i, ok := p.strIndex[s]; ok { + p.rawInt64(int64(i)) + return + } + // otherwise, remember string and write its negative length and bytes + p.strIndex[s] = len(p.strIndex) + p.rawInt64(-int64(len(s))) + for i := 0; i < len(s); i++ { + p.rawByte(s[i]) + } +} + +// marker emits a marker byte and position information which makes +// it easy for a reader to detect if it is "out of sync". Used for +// debugFormat format only. +func (p *exporter) marker(m byte) { + p.rawByte(m) + // Enable this for help tracking down the location + // of an incorrect marker when running in debugFormat. + if false && trace { + p.tracef("#%d ", p.written) + } + p.rawInt64(int64(p.written)) +} + +// rawInt64 should only be used by low-level encoders. +func (p *exporter) rawInt64(x int64) { + var tmp [binary.MaxVarintLen64]byte + n := binary.PutVarint(tmp[:], x) + for i := 0; i < n; i++ { + p.rawByte(tmp[i]) + } +} + +// rawStringln should only be used to emit the initial version string. +func (p *exporter) rawStringln(s string) { + for i := 0; i < len(s); i++ { + p.rawByte(s[i]) + } + p.rawByte('\n') +} + +// rawByte is the bottleneck interface to write to p.out. +// rawByte escapes b as follows (any encoding does that +// hides '$'): +// +// '$' => '|' 'S' +// '|' => '|' '|' +// +// Necessary so other tools can find the end of the +// export data by searching for "$$". +// rawByte should only be used by low-level encoders. +func (p *exporter) rawByte(b byte) { + switch b { + case '$': + // write '$' as '|' 'S' + b = 'S' + fallthrough + case '|': + // write '|' as '|' '|' + p.out.WriteByte('|') + p.written++ + } + p.out.WriteByte(b) + p.written++ +} + +// tracef is like fmt.Printf but it rewrites the format string +// to take care of indentation. +func (p *exporter) tracef(format string, args ...interface{}) { + if strings.ContainsAny(format, "<>\n") { + var buf bytes.Buffer + for i := 0; i < len(format); i++ { + // no need to deal with runes + ch := format[i] + switch ch { + case '>': + p.indent++ + continue + case '<': + p.indent-- + continue + } + buf.WriteByte(ch) + if ch == '\n' { + for j := p.indent; j > 0; j-- { + buf.WriteString(". ") + } + } + } + format = buf.String() + } + fmt.Printf(format, args...) +} + +// Debugging support. +// (tagString is only used when tracing is enabled) +var tagString = [...]string{ + // Packages + -packageTag: "package", + + // Types + -namedTag: "named type", + -arrayTag: "array", + -sliceTag: "slice", + -dddTag: "ddd", + -structTag: "struct", + -pointerTag: "pointer", + -signatureTag: "signature", + -interfaceTag: "interface", + -mapTag: "map", + -chanTag: "chan", + + // Values + -falseTag: "false", + -trueTag: "true", + -int64Tag: "int64", + -floatTag: "float", + -fractionTag: "fraction", + -complexTag: "complex", + -stringTag: "string", + -unknownTag: "unknown", + + // Type aliases + -aliasTag: "alias", +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go new file mode 100644 index 00000000..e3c31078 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go @@ -0,0 +1,1036 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go. + +package gcimporter + +import ( + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +type importer struct { + imports map[string]*types.Package + data []byte + importpath string + buf []byte // for reading strings + version int // export format version + + // object lists + strList []string // in order of appearance + pathList []string // in order of appearance + pkgList []*types.Package // in order of appearance + typList []types.Type // in order of appearance + interfaceList []*types.Interface // for delayed completion only + trackAllTypes bool + + // position encoding + posInfoFormat bool + prevFile string + prevLine int + fake fakeFileSet + + // debugging support + debugFormat bool + read int // bytes read +} + +// BImportData imports a package from the serialized package data +// and returns the number of bytes consumed and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + // catch panics and return them as errors + const currentVersion = 6 + version := -1 // unknown version + defer func() { + if e := recover(); e != nil { + // Return a (possibly nil or incomplete) package unchanged (see #16088). + if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + } + } + }() + + p := importer{ + imports: imports, + data: data, + importpath: path, + version: version, + strList: []string{""}, // empty string is mapped to 0 + pathList: []string{""}, // empty string is mapped to 0 + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*token.File), + }, + } + + // read version info + var versionstr string + if b := p.rawByte(); b == 'c' || b == 'd' { + // Go1.7 encoding; first byte encodes low-level + // encoding format (compact vs debug). + // For backward-compatibility only (avoid problems with + // old installed packages). Newly compiled packages use + // the extensible format string. + // TODO(gri) Remove this support eventually; after Go1.8. + if b == 'd' { + p.debugFormat = true + } + p.trackAllTypes = p.rawByte() == 'a' + p.posInfoFormat = p.int() != 0 + versionstr = p.string() + if versionstr == "v1" { + version = 0 + } + } else { + // Go1.8 extensible encoding + // read version string and extract version number (ignore anything after the version number) + versionstr = p.rawStringln(b) + if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" { + if v, err := strconv.Atoi(s[1]); err == nil && v > 0 { + version = v + } + } + } + p.version = version + + // read version specific flags - extend as necessary + switch p.version { + // case currentVersion: + // ... + // fallthrough + case currentVersion, 5, 4, 3, 2, 1: + p.debugFormat = p.rawStringln(p.rawByte()) == "debug" + p.trackAllTypes = p.int() != 0 + p.posInfoFormat = p.int() != 0 + case 0: + // Go1.7 encoding format - nothing to do here + default: + errorf("unknown bexport format version %d (%q)", p.version, versionstr) + } + + // --- generic export data --- + + // populate typList with predeclared "known" types + p.typList = append(p.typList, predeclared()...) + + // read package data + pkg = p.pkg() + + // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go) + objcount := 0 + for { + tag := p.tagOrIndex() + if tag == endTag { + break + } + p.obj(tag) + objcount++ + } + + // self-verification + if count := p.int(); count != objcount { + errorf("got %d objects; want %d", objcount, count) + } + + // ignore compiler-specific import data + + // complete interfaces + // TODO(gri) re-investigate if we still need to do this in a delayed fashion + for _, typ := range p.interfaceList { + typ.Complete() + } + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), p.pkgList[1:]...) + sort.Sort(byPath(list)) + pkg.SetImports(list) + + // package was imported completely and without errors + pkg.MarkComplete() + + return p.read, pkg, nil +} + +func errorf(format string, args ...interface{}) { + panic(fmt.Sprintf(format, args...)) +} + +func (p *importer) pkg() *types.Package { + // if the package was seen before, i is its index (>= 0) + i := p.tagOrIndex() + if i >= 0 { + return p.pkgList[i] + } + + // otherwise, i is the package tag (< 0) + if i != packageTag { + errorf("unexpected package tag %d version %d", i, p.version) + } + + // read package data + name := p.string() + var path string + if p.version >= 5 { + path = p.path() + } else { + path = p.string() + } + if p.version >= 6 { + p.int() // package height; unused by go/types + } + + // we should never see an empty package name + if name == "" { + errorf("empty package name in import") + } + + // an empty path denotes the package we are currently importing; + // it must be the first package we see + if (path == "") != (len(p.pkgList) == 0) { + errorf("package path %q for pkg index %d", path, len(p.pkgList)) + } + + // if the package was imported before, use that one; otherwise create a new one + if path == "" { + path = p.importpath + } + pkg := p.imports[path] + if pkg == nil { + pkg = types.NewPackage(path, name) + p.imports[path] = pkg + } else if pkg.Name() != name { + errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path) + } + p.pkgList = append(p.pkgList, pkg) + + return pkg +} + +// objTag returns the tag value for each object kind. +func objTag(obj types.Object) int { + switch obj.(type) { + case *types.Const: + return constTag + case *types.TypeName: + return typeTag + case *types.Var: + return varTag + case *types.Func: + return funcTag + default: + errorf("unexpected object: %v (%T)", obj, obj) // panics + panic("unreachable") + } +} + +func sameObj(a, b types.Object) bool { + // Because unnamed types are not canonicalized, we cannot simply compare types for + // (pointer) identity. + // Ideally we'd check equality of constant values as well, but this is good enough. + return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type()) +} + +func (p *importer) declare(obj types.Object) { + pkg := obj.Pkg() + if alt := pkg.Scope().Insert(obj); alt != nil { + // This can only trigger if we import a (non-type) object a second time. + // Excluding type aliases, this cannot happen because 1) we only import a package + // once; and b) we ignore compiler-specific export data which may contain + // functions whose inlined function bodies refer to other functions that + // were already imported. + // However, type aliases require reexporting the original type, so we need + // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go, + // method importer.obj, switch case importing functions). + // TODO(gri) review/update this comment once the gc compiler handles type aliases. + if !sameObj(obj, alt) { + errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt) + } + } +} + +func (p *importer) obj(tag int) { + switch tag { + case constTag: + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + val := p.value() + p.declare(types.NewConst(pos, pkg, name, typ, val)) + + case aliasTag: + // TODO(gri) verify type alias hookup is correct + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + p.declare(types.NewTypeName(pos, pkg, name, typ)) + + case typeTag: + p.typ(nil, nil) + + case varTag: + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + p.declare(types.NewVar(pos, pkg, name, typ)) + + case funcTag: + pos := p.pos() + pkg, name := p.qualifiedName() + params, isddd := p.paramList() + result, _ := p.paramList() + sig := types.NewSignature(nil, params, result, isddd) + p.declare(types.NewFunc(pos, pkg, name, sig)) + + default: + errorf("unexpected object tag %d", tag) + } +} + +const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go + +func (p *importer) pos() token.Pos { + if !p.posInfoFormat { + return token.NoPos + } + + file := p.prevFile + line := p.prevLine + delta := p.int() + line += delta + if p.version >= 5 { + if delta == deltaNewFile { + if n := p.int(); n >= 0 { + // file changed + file = p.path() + line = n + } + } + } else { + if delta == 0 { + if n := p.int(); n >= 0 { + // file changed + file = p.prevFile[:n] + p.string() + line = p.int() + } + } + } + p.prevFile = file + p.prevLine = line + + return p.fake.pos(file, line) +} + +// Synthesize a token.Pos +type fakeFileSet struct { + fset *token.FileSet + files map[string]*token.File +} + +func (s *fakeFileSet) pos(file string, line int) token.Pos { + // Since we don't know the set of needed file positions, we + // reserve maxlines positions per file. + const maxlines = 64 * 1024 + f := s.files[file] + if f == nil { + f = s.fset.AddFile(file, -1, maxlines) + s.files[file] = f + // Allocate the fake linebreak indices on first use. + // TODO(adonovan): opt: save ~512KB using a more complex scheme? + fakeLinesOnce.Do(func() { + fakeLines = make([]int, maxlines) + for i := range fakeLines { + fakeLines[i] = i + } + }) + f.SetLines(fakeLines) + } + + if line > maxlines { + line = 1 + } + + // Treat the file as if it contained only newlines + // and column=1: use the line number as the offset. + return f.Pos(line - 1) +} + +var ( + fakeLines []int + fakeLinesOnce sync.Once +) + +func (p *importer) qualifiedName() (pkg *types.Package, name string) { + name = p.string() + pkg = p.pkg() + return +} + +func (p *importer) record(t types.Type) { + p.typList = append(p.typList, t) +} + +// A dddSlice is a types.Type representing ...T parameters. +// It only appears for parameter types and does not escape +// the importer. +type dddSlice struct { + elem types.Type +} + +func (t *dddSlice) Underlying() types.Type { return t } +func (t *dddSlice) String() string { return "..." + t.elem.String() } + +// parent is the package which declared the type; parent == nil means +// the package currently imported. The parent package is needed for +// exported struct fields and interface methods which don't contain +// explicit package information in the export data. +// +// A non-nil tname is used as the "owner" of the result type; i.e., +// the result type is the underlying type of tname. tname is used +// to give interface methods a named receiver type where possible. +func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type { + // if the type was seen before, i is its index (>= 0) + i := p.tagOrIndex() + if i >= 0 { + return p.typList[i] + } + + // otherwise, i is the type tag (< 0) + switch i { + case namedTag: + // read type object + pos := p.pos() + parent, name := p.qualifiedName() + scope := parent.Scope() + obj := scope.Lookup(name) + + // if the object doesn't exist yet, create and insert it + if obj == nil { + obj = types.NewTypeName(pos, parent, name, nil) + scope.Insert(obj) + } + + if _, ok := obj.(*types.TypeName); !ok { + errorf("pkg = %s, name = %s => %s", parent, name, obj) + } + + // associate new named type with obj if it doesn't exist yet + t0 := types.NewNamed(obj.(*types.TypeName), nil, nil) + + // but record the existing type, if any + tname := obj.Type().(*types.Named) // tname is either t0 or the existing type + p.record(tname) + + // read underlying type + t0.SetUnderlying(p.typ(parent, t0)) + + // interfaces don't have associated methods + if types.IsInterface(t0) { + return tname + } + + // read associated methods + for i := p.int(); i > 0; i-- { + // TODO(gri) replace this with something closer to fieldName + pos := p.pos() + name := p.string() + if !exported(name) { + p.pkg() + } + + recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? + params, isddd := p.paramList() + result, _ := p.paramList() + p.int() // go:nointerface pragma - discarded + + sig := types.NewSignature(recv.At(0), params, result, isddd) + t0.AddMethod(types.NewFunc(pos, parent, name, sig)) + } + + return tname + + case arrayTag: + t := new(types.Array) + if p.trackAllTypes { + p.record(t) + } + + n := p.int64() + *t = *types.NewArray(p.typ(parent, nil), n) + return t + + case sliceTag: + t := new(types.Slice) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewSlice(p.typ(parent, nil)) + return t + + case dddTag: + t := new(dddSlice) + if p.trackAllTypes { + p.record(t) + } + + t.elem = p.typ(parent, nil) + return t + + case structTag: + t := new(types.Struct) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewStruct(p.fieldList(parent)) + return t + + case pointerTag: + t := new(types.Pointer) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewPointer(p.typ(parent, nil)) + return t + + case signatureTag: + t := new(types.Signature) + if p.trackAllTypes { + p.record(t) + } + + params, isddd := p.paramList() + result, _ := p.paramList() + *t = *types.NewSignature(nil, params, result, isddd) + return t + + case interfaceTag: + // Create a dummy entry in the type list. This is safe because we + // cannot expect the interface type to appear in a cycle, as any + // such cycle must contain a named type which would have been + // first defined earlier. + // TODO(gri) Is this still true now that we have type aliases? + // See issue #23225. + n := len(p.typList) + if p.trackAllTypes { + p.record(nil) + } + + var embeddeds []types.Type + for n := p.int(); n > 0; n-- { + p.pos() + embeddeds = append(embeddeds, p.typ(parent, nil)) + } + + t := newInterface(p.methodList(parent, tname), embeddeds) + p.interfaceList = append(p.interfaceList, t) + if p.trackAllTypes { + p.typList[n] = t + } + return t + + case mapTag: + t := new(types.Map) + if p.trackAllTypes { + p.record(t) + } + + key := p.typ(parent, nil) + val := p.typ(parent, nil) + *t = *types.NewMap(key, val) + return t + + case chanTag: + t := new(types.Chan) + if p.trackAllTypes { + p.record(t) + } + + dir := chanDir(p.int()) + val := p.typ(parent, nil) + *t = *types.NewChan(dir, val) + return t + + default: + errorf("unexpected type tag %d", i) // panics + panic("unreachable") + } +} + +func chanDir(d int) types.ChanDir { + // tag values must match the constants in cmd/compile/internal/gc/go.go + switch d { + case 1 /* Crecv */ : + return types.RecvOnly + case 2 /* Csend */ : + return types.SendOnly + case 3 /* Cboth */ : + return types.SendRecv + default: + errorf("unexpected channel dir %d", d) + return 0 + } +} + +func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { + if n := p.int(); n > 0 { + fields = make([]*types.Var, n) + tags = make([]string, n) + for i := range fields { + fields[i], tags[i] = p.field(parent) + } + } + return +} + +func (p *importer) field(parent *types.Package) (*types.Var, string) { + pos := p.pos() + pkg, name, alias := p.fieldName(parent) + typ := p.typ(parent, nil) + tag := p.string() + + anonymous := false + if name == "" { + // anonymous field - typ must be T or *T and T must be a type name + switch typ := deref(typ).(type) { + case *types.Basic: // basic types are named types + pkg = nil // // objects defined in Universe scope have no package + name = typ.Name() + case *types.Named: + name = typ.Obj().Name() + default: + errorf("named base type expected") + } + anonymous = true + } else if alias { + // anonymous field: we have an explicit name because it's an alias + anonymous = true + } + + return types.NewField(pos, pkg, name, typ, anonymous), tag +} + +func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) { + if n := p.int(); n > 0 { + methods = make([]*types.Func, n) + for i := range methods { + methods[i] = p.method(parent, baseType) + } + } + return +} + +func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func { + pos := p.pos() + pkg, name, _ := p.fieldName(parent) + // If we don't have a baseType, use a nil receiver. + // A receiver using the actual interface type (which + // we don't know yet) will be filled in when we call + // types.Interface.Complete. + var recv *types.Var + if baseType != nil { + recv = types.NewVar(token.NoPos, parent, "", baseType) + } + params, isddd := p.paramList() + result, _ := p.paramList() + sig := types.NewSignature(recv, params, result, isddd) + return types.NewFunc(pos, pkg, name, sig) +} + +func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) { + name = p.string() + pkg = parent + if pkg == nil { + // use the imported package instead + pkg = p.pkgList[0] + } + if p.version == 0 && name == "_" { + // version 0 didn't export a package for _ fields + return + } + switch name { + case "": + // 1) field name matches base type name and is exported: nothing to do + case "?": + // 2) field name matches base type name and is not exported: need package + name = "" + pkg = p.pkg() + case "@": + // 3) field name doesn't match type name (alias) + name = p.string() + alias = true + fallthrough + default: + if !exported(name) { + pkg = p.pkg() + } + } + return +} + +func (p *importer) paramList() (*types.Tuple, bool) { + n := p.int() + if n == 0 { + return nil, false + } + // negative length indicates unnamed parameters + named := true + if n < 0 { + n = -n + named = false + } + // n > 0 + params := make([]*types.Var, n) + isddd := false + for i := range params { + params[i], isddd = p.param(named) + } + return types.NewTuple(params...), isddd +} + +func (p *importer) param(named bool) (*types.Var, bool) { + t := p.typ(nil, nil) + td, isddd := t.(*dddSlice) + if isddd { + t = types.NewSlice(td.elem) + } + + var pkg *types.Package + var name string + if named { + name = p.string() + if name == "" { + errorf("expected named parameter") + } + if name != "_" { + pkg = p.pkg() + } + if i := strings.Index(name, "·"); i > 0 { + name = name[:i] // cut off gc-specific parameter numbering + } + } + + // read and discard compiler-specific info + p.string() + + return types.NewVar(token.NoPos, pkg, name, t), isddd +} + +func exported(name string) bool { + ch, _ := utf8.DecodeRuneInString(name) + return unicode.IsUpper(ch) +} + +func (p *importer) value() constant.Value { + switch tag := p.tagOrIndex(); tag { + case falseTag: + return constant.MakeBool(false) + case trueTag: + return constant.MakeBool(true) + case int64Tag: + return constant.MakeInt64(p.int64()) + case floatTag: + return p.float() + case complexTag: + re := p.float() + im := p.float() + return constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + case stringTag: + return constant.MakeString(p.string()) + case unknownTag: + return constant.MakeUnknown() + default: + errorf("unexpected value tag %d", tag) // panics + panic("unreachable") + } +} + +func (p *importer) float() constant.Value { + sign := p.int() + if sign == 0 { + return constant.MakeInt64(0) + } + + exp := p.int() + mant := []byte(p.string()) // big endian + + // remove leading 0's if any + for len(mant) > 0 && mant[0] == 0 { + mant = mant[1:] + } + + // convert to little endian + // TODO(gri) go/constant should have a more direct conversion function + // (e.g., once it supports a big.Float based implementation) + for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 { + mant[i], mant[j] = mant[j], mant[i] + } + + // adjust exponent (constant.MakeFromBytes creates an integer value, + // but mant represents the mantissa bits such that 0.5 <= mant < 1.0) + exp -= len(mant) << 3 + if len(mant) > 0 { + for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 { + exp++ + } + } + + x := constant.MakeFromBytes(mant) + switch { + case exp < 0: + d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) + x = constant.BinaryOp(x, token.QUO, d) + case exp > 0: + x = constant.Shift(x, token.SHL, uint(exp)) + } + + if sign < 0 { + x = constant.UnaryOp(token.SUB, x, 0) + } + return x +} + +// ---------------------------------------------------------------------------- +// Low-level decoders + +func (p *importer) tagOrIndex() int { + if p.debugFormat { + p.marker('t') + } + + return int(p.rawInt64()) +} + +func (p *importer) int() int { + x := p.int64() + if int64(int(x)) != x { + errorf("exported integer too large") + } + return int(x) +} + +func (p *importer) int64() int64 { + if p.debugFormat { + p.marker('i') + } + + return p.rawInt64() +} + +func (p *importer) path() string { + if p.debugFormat { + p.marker('p') + } + // if the path was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.pathList[i] + } + // otherwise, i is the negative path length (< 0) + a := make([]string, -i) + for n := range a { + a[n] = p.string() + } + s := strings.Join(a, "/") + p.pathList = append(p.pathList, s) + return s +} + +func (p *importer) string() string { + if p.debugFormat { + p.marker('s') + } + // if the string was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.strList[i] + } + // otherwise, i is the negative string length (< 0) + if n := int(-i); n <= cap(p.buf) { + p.buf = p.buf[:n] + } else { + p.buf = make([]byte, n) + } + for i := range p.buf { + p.buf[i] = p.rawByte() + } + s := string(p.buf) + p.strList = append(p.strList, s) + return s +} + +func (p *importer) marker(want byte) { + if got := p.rawByte(); got != want { + errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) + } + + pos := p.read + if n := int(p.rawInt64()); n != pos { + errorf("incorrect position: got %d; want %d", n, pos) + } +} + +// rawInt64 should only be used by low-level decoders. +func (p *importer) rawInt64() int64 { + i, err := binary.ReadVarint(p) + if err != nil { + errorf("read error: %v", err) + } + return i +} + +// rawStringln should only be used to read the initial version string. +func (p *importer) rawStringln(b byte) string { + p.buf = p.buf[:0] + for b != '\n' { + p.buf = append(p.buf, b) + b = p.rawByte() + } + return string(p.buf) +} + +// needed for binary.ReadVarint in rawInt64 +func (p *importer) ReadByte() (byte, error) { + return p.rawByte(), nil +} + +// byte is the bottleneck interface for reading p.data. +// It unescapes '|' 'S' to '$' and '|' '|' to '|'. +// rawByte should only be used by low-level decoders. +func (p *importer) rawByte() byte { + b := p.data[0] + r := 1 + if b == '|' { + b = p.data[1] + r = 2 + switch b { + case 'S': + b = '$' + case '|': + // nothing to do + default: + errorf("unexpected escape sequence in export data") + } + } + p.data = p.data[r:] + p.read += r + return b + +} + +// ---------------------------------------------------------------------------- +// Export format + +// Tags. Must be < 0. +const ( + // Objects + packageTag = -(iota + 1) + constTag + typeTag + varTag + funcTag + endTag + + // Types + namedTag + arrayTag + sliceTag + dddTag + structTag + pointerTag + signatureTag + interfaceTag + mapTag + chanTag + + // Values + falseTag + trueTag + int64Tag + floatTag + fractionTag // not used by gc + complexTag + stringTag + nilTag // only used by gc (appears in exported inlined function bodies) + unknownTag // not used by gc (only appears in packages with errors) + + // Type aliases + aliasTag +) + +var predecl []types.Type // initialized lazily + +func predeclared() []types.Type { + if predecl == nil { + // initialize lazily to be sure that all + // elements have been initialized before + predecl = []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + } + } + return predecl +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go new file mode 100644 index 00000000..f33dc561 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go @@ -0,0 +1,93 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. + +// This file implements FindExportData. + +package gcimporter + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" +) + +func readGopackHeader(r *bufio.Reader) (name string, size int, err error) { + // See $GOROOT/include/ar.h. + hdr := make([]byte, 16+12+6+6+8+10+2) + _, err = io.ReadFull(r, hdr) + if err != nil { + return + } + // leave for debugging + if false { + fmt.Printf("header: %s", hdr) + } + s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) + size, err = strconv.Atoi(s) + if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { + err = fmt.Errorf("invalid archive header") + return + } + name = strings.TrimSpace(string(hdr[:16])) + return +} + +// FindExportData positions the reader r at the beginning of the +// export data section of an underlying GC-created object/archive +// file by reading from it. The reader must be positioned at the +// start of the file before calling this function. The hdr result +// is the string before the export data, either "$$" or "$$B". +// +func FindExportData(r *bufio.Reader) (hdr string, err error) { + // Read first line to make sure this is an object file. + line, err := r.ReadSlice('\n') + if err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + + if string(line) == "!\n" { + // Archive file. Scan to __.PKGDEF. + var name string + if name, _, err = readGopackHeader(r); err != nil { + return + } + + // First entry should be __.PKGDEF. + if name != "__.PKGDEF" { + err = fmt.Errorf("go archive is missing __.PKGDEF") + return + } + + // Read first line of __.PKGDEF data, so that line + // is once again the first line of the input. + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + } + + // Now at __.PKGDEF in archive or still at beginning of file. + // Either way, line should begin with "go object ". + if !strings.HasPrefix(string(line), "go object ") { + err = fmt.Errorf("not a Go object file") + return + } + + // Skip over object header to export data. + // Begins after first line starting with $$. + for line[0] != '$' { + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + } + hdr = string(line) + + return +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go new file mode 100644 index 00000000..9cf18660 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go @@ -0,0 +1,1078 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a modified copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go, +// but it also contains the original source-based importer code for Go1.6. +// Once we stop supporting 1.6, we can remove that code. + +// Package gcimporter provides various functions for reading +// gc-generated object files that can be used to implement the +// Importer interface defined by the Go 1.5 standard library package. +package gcimporter // import "golang.org/x/tools/go/internal/gcimporter" + +import ( + "bufio" + "errors" + "fmt" + "go/build" + "go/constant" + "go/token" + "go/types" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "text/scanner" +) + +// debugging/development support +const debug = false + +var pkgExts = [...]string{".a", ".o"} + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// If no file was found, an empty filename is returned. +// +func FindPkg(path, srcDir string) (filename, id string) { + if path == "" { + return + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + id = path // make sure we have an id to print in error message + return + } + noext = strings.TrimSuffix(bp.PkgObj, ".a") + id = bp.ImportPath + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + filename = "" // not found + return +} + +// ImportData imports a package by reading the gc-generated export data, +// adds the corresponding package object to the packages map indexed by id, +// and returns the object. +// +// The packages map must contains all packages already imported. The data +// reader position must be the beginning of the export data section. The +// filename is only used in error messages. +// +// If packages[id] contains the completely imported package, that package +// can be used directly, and there is no need to call this function (but +// there is also no harm but for extra time used). +// +func ImportData(packages map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) { + // support for parser error handling + defer func() { + switch r := recover().(type) { + case nil: + // nothing to do + case importError: + err = r + default: + panic(r) // internal error + } + }() + + var p parser + p.init(filename, id, data, packages) + pkg = p.parseExport() + + return +} + +// Import imports a gc-generated package given its import path and srcDir, adds +// the corresponding package object to the packages map, and returns the object. +// The packages map must contain all packages already imported. +// +func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { + var rc io.ReadCloser + var filename, id string + if lookup != nil { + // With custom lookup specified, assume that caller has + // converted path to a canonical import path for use in the map. + if path == "unsafe" { + return types.Unsafe, nil + } + id = path + + // No need to re-import if the package was imported completely before. + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + f, err := lookup(path) + if err != nil { + return nil, err + } + rc = f + } else { + filename, id = FindPkg(path, srcDir) + if filename == "" { + if path == "unsafe" { + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %q", id) + } + + // no need to re-import if the package was imported completely before + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + // add file name to error + err = fmt.Errorf("%s: %v", filename, err) + } + }() + rc = f + } + defer rc.Close() + + var hdr string + buf := bufio.NewReader(rc) + if hdr, err = FindExportData(buf); err != nil { + return + } + + switch hdr { + case "$$\n": + // Work-around if we don't have a filename; happens only if lookup != nil. + // Either way, the filename is only needed for importer error messages, so + // this is fine. + if filename == "" { + filename = path + } + return ImportData(packages, filename, id, buf) + + case "$$B\n": + var data []byte + data, err = ioutil.ReadAll(buf) + if err != nil { + break + } + + // TODO(gri): allow clients of go/importer to provide a FileSet. + // Or, define a new standard go/types/gcexportdata package. + fset := token.NewFileSet() + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 && data[0] == 'i' { + _, pkg, err = IImportData(fset, packages, data[1:], id) + } else { + _, pkg, err = BImportData(fset, packages, data, id) + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) + } + + return +} + +// ---------------------------------------------------------------------------- +// Parser + +// TODO(gri) Imported objects don't have position information. +// Ideally use the debug table line info; alternatively +// create some fake position (or the position of the +// import). That way error messages referring to imported +// objects can print meaningful information. + +// parser parses the exports inside a gc compiler-produced +// object/archive file and populates its scope with the results. +type parser struct { + scanner scanner.Scanner + tok rune // current token + lit string // literal string; only valid for Ident, Int, String tokens + id string // package id of imported package + sharedPkgs map[string]*types.Package // package id -> package object (across importer) + localPkgs map[string]*types.Package // package id -> package object (just this package) +} + +func (p *parser) init(filename, id string, src io.Reader, packages map[string]*types.Package) { + p.scanner.Init(src) + p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) } + p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanChars | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments + p.scanner.Whitespace = 1<<'\t' | 1<<' ' + p.scanner.Filename = filename // for good error messages + p.next() + p.id = id + p.sharedPkgs = packages + if debug { + // check consistency of packages map + for _, pkg := range packages { + if pkg.Name() == "" { + fmt.Printf("no package name for %s\n", pkg.Path()) + } + } + } +} + +func (p *parser) next() { + p.tok = p.scanner.Scan() + switch p.tok { + case scanner.Ident, scanner.Int, scanner.Char, scanner.String, '·': + p.lit = p.scanner.TokenText() + default: + p.lit = "" + } + if debug { + fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit) + } +} + +func declTypeName(pkg *types.Package, name string) *types.TypeName { + scope := pkg.Scope() + if obj := scope.Lookup(name); obj != nil { + return obj.(*types.TypeName) + } + obj := types.NewTypeName(token.NoPos, pkg, name, nil) + // a named type may be referred to before the underlying type + // is known - set it up + types.NewNamed(obj, nil, nil) + scope.Insert(obj) + return obj +} + +// ---------------------------------------------------------------------------- +// Error handling + +// Internal errors are boxed as importErrors. +type importError struct { + pos scanner.Position + err error +} + +func (e importError) Error() string { + return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err) +} + +func (p *parser) error(err interface{}) { + if s, ok := err.(string); ok { + err = errors.New(s) + } + // panic with a runtime.Error if err is not an error + panic(importError{p.scanner.Pos(), err.(error)}) +} + +func (p *parser) errorf(format string, args ...interface{}) { + p.error(fmt.Sprintf(format, args...)) +} + +func (p *parser) expect(tok rune) string { + lit := p.lit + if p.tok != tok { + p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit) + } + p.next() + return lit +} + +func (p *parser) expectSpecial(tok string) { + sep := 'x' // not white space + i := 0 + for i < len(tok) && p.tok == rune(tok[i]) && sep > ' ' { + sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token + p.next() + i++ + } + if i < len(tok) { + p.errorf("expected %q, got %q", tok, tok[0:i]) + } +} + +func (p *parser) expectKeyword(keyword string) { + lit := p.expect(scanner.Ident) + if lit != keyword { + p.errorf("expected keyword %s, got %q", keyword, lit) + } +} + +// ---------------------------------------------------------------------------- +// Qualified and unqualified names + +// PackageId = string_lit . +// +func (p *parser) parsePackageId() string { + id, err := strconv.Unquote(p.expect(scanner.String)) + if err != nil { + p.error(err) + } + // id == "" stands for the imported package id + // (only known at time of package installation) + if id == "" { + id = p.id + } + return id +} + +// PackageName = ident . +// +func (p *parser) parsePackageName() string { + return p.expect(scanner.Ident) +} + +// dotIdentifier = ( ident | '·' ) { ident | int | '·' } . +func (p *parser) parseDotIdent() string { + ident := "" + if p.tok != scanner.Int { + sep := 'x' // not white space + for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' { + ident += p.lit + sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token + p.next() + } + } + if ident == "" { + p.expect(scanner.Ident) // use expect() for error handling + } + return ident +} + +// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) . +// +func (p *parser) parseQualifiedName() (id, name string) { + p.expect('@') + id = p.parsePackageId() + p.expect('.') + // Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields. + if p.tok == '?' { + p.next() + } else { + name = p.parseDotIdent() + } + return +} + +// getPkg returns the package for a given id. If the package is +// not found, create the package and add it to the p.localPkgs +// and p.sharedPkgs maps. name is the (expected) name of the +// package. If name == "", the package name is expected to be +// set later via an import clause in the export data. +// +// id identifies a package, usually by a canonical package path like +// "encoding/json" but possibly by a non-canonical import path like +// "./json". +// +func (p *parser) getPkg(id, name string) *types.Package { + // package unsafe is not in the packages maps - handle explicitly + if id == "unsafe" { + return types.Unsafe + } + + pkg := p.localPkgs[id] + if pkg == nil { + // first import of id from this package + pkg = p.sharedPkgs[id] + if pkg == nil { + // first import of id by this importer; + // add (possibly unnamed) pkg to shared packages + pkg = types.NewPackage(id, name) + p.sharedPkgs[id] = pkg + } + // add (possibly unnamed) pkg to local packages + if p.localPkgs == nil { + p.localPkgs = make(map[string]*types.Package) + } + p.localPkgs[id] = pkg + } else if name != "" { + // package exists already and we have an expected package name; + // make sure names match or set package name if necessary + if pname := pkg.Name(); pname == "" { + pkg.SetName(name) + } else if pname != name { + p.errorf("%s package name mismatch: %s (given) vs %s (expected)", id, pname, name) + } + } + return pkg +} + +// parseExportedName is like parseQualifiedName, but +// the package id is resolved to an imported *types.Package. +// +func (p *parser) parseExportedName() (pkg *types.Package, name string) { + id, name := p.parseQualifiedName() + pkg = p.getPkg(id, "") + return +} + +// ---------------------------------------------------------------------------- +// Types + +// BasicType = identifier . +// +func (p *parser) parseBasicType() types.Type { + id := p.expect(scanner.Ident) + obj := types.Universe.Lookup(id) + if obj, ok := obj.(*types.TypeName); ok { + return obj.Type() + } + p.errorf("not a basic type: %s", id) + return nil +} + +// ArrayType = "[" int_lit "]" Type . +// +func (p *parser) parseArrayType(parent *types.Package) types.Type { + // "[" already consumed and lookahead known not to be "]" + lit := p.expect(scanner.Int) + p.expect(']') + elem := p.parseType(parent) + n, err := strconv.ParseInt(lit, 10, 64) + if err != nil { + p.error(err) + } + return types.NewArray(elem, n) +} + +// MapType = "map" "[" Type "]" Type . +// +func (p *parser) parseMapType(parent *types.Package) types.Type { + p.expectKeyword("map") + p.expect('[') + key := p.parseType(parent) + p.expect(']') + elem := p.parseType(parent) + return types.NewMap(key, elem) +} + +// Name = identifier | "?" | QualifiedName . +// +// For unqualified and anonymous names, the returned package is the parent +// package unless parent == nil, in which case the returned package is the +// package being imported. (The parent package is not nil if the the name +// is an unqualified struct field or interface method name belonging to a +// type declared in another package.) +// +// For qualified names, the returned package is nil (and not created if +// it doesn't exist yet) unless materializePkg is set (which creates an +// unnamed package with valid package path). In the latter case, a +// subsequent import clause is expected to provide a name for the package. +// +func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) { + pkg = parent + if pkg == nil { + pkg = p.sharedPkgs[p.id] + } + switch p.tok { + case scanner.Ident: + name = p.lit + p.next() + case '?': + // anonymous + p.next() + case '@': + // exported name prefixed with package path + pkg = nil + var id string + id, name = p.parseQualifiedName() + if materializePkg { + pkg = p.getPkg(id, "") + } + default: + p.error("name expected") + } + return +} + +func deref(typ types.Type) types.Type { + if p, _ := typ.(*types.Pointer); p != nil { + return p.Elem() + } + return typ +} + +// Field = Name Type [ string_lit ] . +// +func (p *parser) parseField(parent *types.Package) (*types.Var, string) { + pkg, name := p.parseName(parent, true) + + if name == "_" { + // Blank fields should be package-qualified because they + // are unexported identifiers, but gc does not qualify them. + // Assuming that the ident belongs to the current package + // causes types to change during re-exporting, leading + // to spurious "can't assign A to B" errors from go/types. + // As a workaround, pretend all blank fields belong + // to the same unique dummy package. + const blankpkg = "<_>" + pkg = p.getPkg(blankpkg, blankpkg) + } + + typ := p.parseType(parent) + anonymous := false + if name == "" { + // anonymous field - typ must be T or *T and T must be a type name + switch typ := deref(typ).(type) { + case *types.Basic: // basic types are named types + pkg = nil // objects defined in Universe scope have no package + name = typ.Name() + case *types.Named: + name = typ.Obj().Name() + default: + p.errorf("anonymous field expected") + } + anonymous = true + } + tag := "" + if p.tok == scanner.String { + s := p.expect(scanner.String) + var err error + tag, err = strconv.Unquote(s) + if err != nil { + p.errorf("invalid struct tag %s: %s", s, err) + } + } + return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag +} + +// StructType = "struct" "{" [ FieldList ] "}" . +// FieldList = Field { ";" Field } . +// +func (p *parser) parseStructType(parent *types.Package) types.Type { + var fields []*types.Var + var tags []string + + p.expectKeyword("struct") + p.expect('{') + for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { + if i > 0 { + p.expect(';') + } + fld, tag := p.parseField(parent) + if tag != "" && tags == nil { + tags = make([]string, i) + } + if tags != nil { + tags = append(tags, tag) + } + fields = append(fields, fld) + } + p.expect('}') + + return types.NewStruct(fields, tags) +} + +// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] . +// +func (p *parser) parseParameter() (par *types.Var, isVariadic bool) { + _, name := p.parseName(nil, false) + // remove gc-specific parameter numbering + if i := strings.Index(name, "·"); i >= 0 { + name = name[:i] + } + if p.tok == '.' { + p.expectSpecial("...") + isVariadic = true + } + typ := p.parseType(nil) + if isVariadic { + typ = types.NewSlice(typ) + } + // ignore argument tag (e.g. "noescape") + if p.tok == scanner.String { + p.next() + } + // TODO(gri) should we provide a package? + par = types.NewVar(token.NoPos, nil, name, typ) + return +} + +// Parameters = "(" [ ParameterList ] ")" . +// ParameterList = { Parameter "," } Parameter . +// +func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) { + p.expect('(') + for p.tok != ')' && p.tok != scanner.EOF { + if len(list) > 0 { + p.expect(',') + } + par, variadic := p.parseParameter() + list = append(list, par) + if variadic { + if isVariadic { + p.error("... not on final argument") + } + isVariadic = true + } + } + p.expect(')') + + return +} + +// Signature = Parameters [ Result ] . +// Result = Type | Parameters . +// +func (p *parser) parseSignature(recv *types.Var) *types.Signature { + params, isVariadic := p.parseParameters() + + // optional result type + var results []*types.Var + if p.tok == '(' { + var variadic bool + results, variadic = p.parseParameters() + if variadic { + p.error("... not permitted on result type") + } + } + + return types.NewSignature(recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic) +} + +// InterfaceType = "interface" "{" [ MethodList ] "}" . +// MethodList = Method { ";" Method } . +// Method = Name Signature . +// +// The methods of embedded interfaces are always "inlined" +// by the compiler and thus embedded interfaces are never +// visible in the export data. +// +func (p *parser) parseInterfaceType(parent *types.Package) types.Type { + var methods []*types.Func + + p.expectKeyword("interface") + p.expect('{') + for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { + if i > 0 { + p.expect(';') + } + pkg, name := p.parseName(parent, true) + sig := p.parseSignature(nil) + methods = append(methods, types.NewFunc(token.NoPos, pkg, name, sig)) + } + p.expect('}') + + // Complete requires the type's embedded interfaces to be fully defined, + // but we do not define any + return types.NewInterface(methods, nil).Complete() +} + +// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type . +// +func (p *parser) parseChanType(parent *types.Package) types.Type { + dir := types.SendRecv + if p.tok == scanner.Ident { + p.expectKeyword("chan") + if p.tok == '<' { + p.expectSpecial("<-") + dir = types.SendOnly + } + } else { + p.expectSpecial("<-") + p.expectKeyword("chan") + dir = types.RecvOnly + } + elem := p.parseType(parent) + return types.NewChan(dir, elem) +} + +// Type = +// BasicType | TypeName | ArrayType | SliceType | StructType | +// PointerType | FuncType | InterfaceType | MapType | ChanType | +// "(" Type ")" . +// +// BasicType = ident . +// TypeName = ExportedName . +// SliceType = "[" "]" Type . +// PointerType = "*" Type . +// FuncType = "func" Signature . +// +func (p *parser) parseType(parent *types.Package) types.Type { + switch p.tok { + case scanner.Ident: + switch p.lit { + default: + return p.parseBasicType() + case "struct": + return p.parseStructType(parent) + case "func": + // FuncType + p.next() + return p.parseSignature(nil) + case "interface": + return p.parseInterfaceType(parent) + case "map": + return p.parseMapType(parent) + case "chan": + return p.parseChanType(parent) + } + case '@': + // TypeName + pkg, name := p.parseExportedName() + return declTypeName(pkg, name).Type() + case '[': + p.next() // look ahead + if p.tok == ']' { + // SliceType + p.next() + return types.NewSlice(p.parseType(parent)) + } + return p.parseArrayType(parent) + case '*': + // PointerType + p.next() + return types.NewPointer(p.parseType(parent)) + case '<': + return p.parseChanType(parent) + case '(': + // "(" Type ")" + p.next() + typ := p.parseType(parent) + p.expect(')') + return typ + } + p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit) + return nil +} + +// ---------------------------------------------------------------------------- +// Declarations + +// ImportDecl = "import" PackageName PackageId . +// +func (p *parser) parseImportDecl() { + p.expectKeyword("import") + name := p.parsePackageName() + p.getPkg(p.parsePackageId(), name) +} + +// int_lit = [ "+" | "-" ] { "0" ... "9" } . +// +func (p *parser) parseInt() string { + s := "" + switch p.tok { + case '-': + s = "-" + p.next() + case '+': + p.next() + } + return s + p.expect(scanner.Int) +} + +// number = int_lit [ "p" int_lit ] . +// +func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) { + // mantissa + mant := constant.MakeFromLiteral(p.parseInt(), token.INT, 0) + if mant == nil { + panic("invalid mantissa") + } + + if p.lit == "p" { + // exponent (base 2) + p.next() + exp, err := strconv.ParseInt(p.parseInt(), 10, 0) + if err != nil { + p.error(err) + } + if exp < 0 { + denom := constant.MakeInt64(1) + denom = constant.Shift(denom, token.SHL, uint(-exp)) + typ = types.Typ[types.UntypedFloat] + val = constant.BinaryOp(mant, token.QUO, denom) + return + } + if exp > 0 { + mant = constant.Shift(mant, token.SHL, uint(exp)) + } + typ = types.Typ[types.UntypedFloat] + val = mant + return + } + + typ = types.Typ[types.UntypedInt] + val = mant + return +} + +// ConstDecl = "const" ExportedName [ Type ] "=" Literal . +// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit . +// bool_lit = "true" | "false" . +// complex_lit = "(" float_lit "+" float_lit "i" ")" . +// rune_lit = "(" int_lit "+" int_lit ")" . +// string_lit = `"` { unicode_char } `"` . +// +func (p *parser) parseConstDecl() { + p.expectKeyword("const") + pkg, name := p.parseExportedName() + + var typ0 types.Type + if p.tok != '=' { + // constant types are never structured - no need for parent type + typ0 = p.parseType(nil) + } + + p.expect('=') + var typ types.Type + var val constant.Value + switch p.tok { + case scanner.Ident: + // bool_lit + if p.lit != "true" && p.lit != "false" { + p.error("expected true or false") + } + typ = types.Typ[types.UntypedBool] + val = constant.MakeBool(p.lit == "true") + p.next() + + case '-', scanner.Int: + // int_lit + typ, val = p.parseNumber() + + case '(': + // complex_lit or rune_lit + p.next() + if p.tok == scanner.Char { + p.next() + p.expect('+') + typ = types.Typ[types.UntypedRune] + _, val = p.parseNumber() + p.expect(')') + break + } + _, re := p.parseNumber() + p.expect('+') + _, im := p.parseNumber() + p.expectKeyword("i") + p.expect(')') + typ = types.Typ[types.UntypedComplex] + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + case scanner.Char: + // rune_lit + typ = types.Typ[types.UntypedRune] + val = constant.MakeFromLiteral(p.lit, token.CHAR, 0) + p.next() + + case scanner.String: + // string_lit + typ = types.Typ[types.UntypedString] + val = constant.MakeFromLiteral(p.lit, token.STRING, 0) + p.next() + + default: + p.errorf("expected literal got %s", scanner.TokenString(p.tok)) + } + + if typ0 == nil { + typ0 = typ + } + + pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val)) +} + +// TypeDecl = "type" ExportedName Type . +// +func (p *parser) parseTypeDecl() { + p.expectKeyword("type") + pkg, name := p.parseExportedName() + obj := declTypeName(pkg, name) + + // The type object may have been imported before and thus already + // have a type associated with it. We still need to parse the type + // structure, but throw it away if the object already has a type. + // This ensures that all imports refer to the same type object for + // a given type declaration. + typ := p.parseType(pkg) + + if name := obj.Type().(*types.Named); name.Underlying() == nil { + name.SetUnderlying(typ) + } +} + +// VarDecl = "var" ExportedName Type . +// +func (p *parser) parseVarDecl() { + p.expectKeyword("var") + pkg, name := p.parseExportedName() + typ := p.parseType(pkg) + pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ)) +} + +// Func = Signature [ Body ] . +// Body = "{" ... "}" . +// +func (p *parser) parseFunc(recv *types.Var) *types.Signature { + sig := p.parseSignature(recv) + if p.tok == '{' { + p.next() + for i := 1; i > 0; p.next() { + switch p.tok { + case '{': + i++ + case '}': + i-- + } + } + } + return sig +} + +// MethodDecl = "func" Receiver Name Func . +// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" . +// +func (p *parser) parseMethodDecl() { + // "func" already consumed + p.expect('(') + recv, _ := p.parseParameter() // receiver + p.expect(')') + + // determine receiver base type object + base := deref(recv.Type()).(*types.Named) + + // parse method name, signature, and possibly inlined body + _, name := p.parseName(nil, false) + sig := p.parseFunc(recv) + + // methods always belong to the same package as the base type object + pkg := base.Obj().Pkg() + + // add method to type unless type was imported before + // and method exists already + // TODO(gri) This leads to a quadratic algorithm - ok for now because method counts are small. + base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig)) +} + +// FuncDecl = "func" ExportedName Func . +// +func (p *parser) parseFuncDecl() { + // "func" already consumed + pkg, name := p.parseExportedName() + typ := p.parseFunc(nil) + pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ)) +} + +// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" . +// +func (p *parser) parseDecl() { + if p.tok == scanner.Ident { + switch p.lit { + case "import": + p.parseImportDecl() + case "const": + p.parseConstDecl() + case "type": + p.parseTypeDecl() + case "var": + p.parseVarDecl() + case "func": + p.next() // look ahead + if p.tok == '(' { + p.parseMethodDecl() + } else { + p.parseFuncDecl() + } + } + } + p.expect('\n') +} + +// ---------------------------------------------------------------------------- +// Export + +// Export = "PackageClause { Decl } "$$" . +// PackageClause = "package" PackageName [ "safe" ] "\n" . +// +func (p *parser) parseExport() *types.Package { + p.expectKeyword("package") + name := p.parsePackageName() + if p.tok == scanner.Ident && p.lit == "safe" { + // package was compiled with -u option - ignore + p.next() + } + p.expect('\n') + + pkg := p.getPkg(p.id, name) + + for p.tok != '$' && p.tok != scanner.EOF { + p.parseDecl() + } + + if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' { + // don't call next()/expect() since reading past the + // export data may cause scanner errors (e.g. NUL chars) + p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch) + } + + if n := p.scanner.ErrorCount; n != 0 { + p.errorf("expected no scanner errors, got %d", n) + } + + // Record all locally referenced packages as imports. + var imports []*types.Package + for id, pkg2 := range p.localPkgs { + if pkg2.Name() == "" { + p.errorf("%s package has no name", id) + } + if id == p.id { + continue // avoid self-edge + } + imports = append(imports, pkg2) + } + sort.Sort(byPath(imports)) + pkg.SetImports(imports) + + // package was imported completely and without errors + pkg.MarkComplete() + + return pkg +} + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go new file mode 100644 index 00000000..be671c79 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go @@ -0,0 +1,723 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; +// see that file for specification of the format. + +// +build go1.11 + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "go/ast" + "go/constant" + "go/token" + "go/types" + "io" + "math/big" + "reflect" + "sort" +) + +// Current indexed export format version. Increase with each format change. +// 0: Go1.11 encoding +const iexportVersion = 0 + +// IExportData returns the binary export data for pkg. +// If no file set is provided, position info will be missing. +func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + + p := iexporter{ + out: bytes.NewBuffer(nil), + fset: fset, + allPkgs: map[*types.Package]bool{}, + stringIndex: map[string]uint64{}, + declIndex: map[types.Object]uint64{}, + typIndex: map[types.Type]uint64{}, + } + + for i, pt := range predeclared() { + p.typIndex[pt] = uint64(i) + } + if len(p.typIndex) > predeclReserved { + panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)) + } + + // Initialize work queue with exported declarations. + scope := pkg.Scope() + for _, name := range scope.Names() { + if ast.IsExported(name) { + p.pushDecl(scope.Lookup(name)) + } + } + + // Loop until no more work. + for !p.declTodo.empty() { + p.doDecl(p.declTodo.popHead()) + } + + // Append indices to data0 section. + dataLen := uint64(p.data0.Len()) + w := p.newWriter() + w.writeIndex(p.declIndex, pkg) + w.flush() + + // Assemble header. + var hdr intWriter + hdr.WriteByte('i') + hdr.uint64(iexportVersion) + hdr.uint64(uint64(p.strings.Len())) + hdr.uint64(dataLen) + + // Flush output. + io.Copy(p.out, &hdr) + io.Copy(p.out, &p.strings) + io.Copy(p.out, &p.data0) + + return p.out.Bytes(), nil +} + +// writeIndex writes out an object index. mainIndex indicates whether +// we're writing out the main index, which is also read by +// non-compiler tools and includes a complete package description +// (i.e., name and height). +func (w *exportWriter) writeIndex(index map[types.Object]uint64, localpkg *types.Package) { + // Build a map from packages to objects from that package. + pkgObjs := map[*types.Package][]types.Object{} + + // For the main index, make sure to include every package that + // we reference, even if we're not exporting (or reexporting) + // any symbols from it. + pkgObjs[localpkg] = nil + for pkg := range w.p.allPkgs { + pkgObjs[pkg] = nil + } + + for obj := range index { + pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], obj) + } + + var pkgs []*types.Package + for pkg, objs := range pkgObjs { + pkgs = append(pkgs, pkg) + + sort.Slice(objs, func(i, j int) bool { + return objs[i].Name() < objs[j].Name() + }) + } + + sort.Slice(pkgs, func(i, j int) bool { + return pkgs[i].Path() < pkgs[j].Path() + }) + + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.string(pkg.Path()) + w.string(pkg.Name()) + w.uint64(uint64(0)) // package height is not needed for go/types + + objs := pkgObjs[pkg] + w.uint64(uint64(len(objs))) + for _, obj := range objs { + w.string(obj.Name()) + w.uint64(index[obj]) + } + } +} + +type iexporter struct { + fset *token.FileSet + out *bytes.Buffer + + // allPkgs tracks all packages that have been referenced by + // the export data, so we can ensure to include them in the + // main index. + allPkgs map[*types.Package]bool + + declTodo objQueue + + strings intWriter + stringIndex map[string]uint64 + + data0 intWriter + declIndex map[types.Object]uint64 + typIndex map[types.Type]uint64 +} + +// stringOff returns the offset of s within the string section. +// If not already present, it's added to the end. +func (p *iexporter) stringOff(s string) uint64 { + off, ok := p.stringIndex[s] + if !ok { + off = uint64(p.strings.Len()) + p.stringIndex[s] = off + + p.strings.uint64(uint64(len(s))) + p.strings.WriteString(s) + } + return off +} + +// pushDecl adds n to the declaration work queue, if not already present. +func (p *iexporter) pushDecl(obj types.Object) { + // Package unsafe is known to the compiler and predeclared. + assert(obj.Pkg() != types.Unsafe) + + if _, ok := p.declIndex[obj]; ok { + return + } + + p.declIndex[obj] = ^uint64(0) // mark n present in work queue + p.declTodo.pushTail(obj) +} + +// exportWriter handles writing out individual data section chunks. +type exportWriter struct { + p *iexporter + + data intWriter + currPkg *types.Package + prevFile string + prevLine int64 +} + +func (p *iexporter) doDecl(obj types.Object) { + w := p.newWriter() + w.setPkg(obj.Pkg(), false) + + switch obj := obj.(type) { + case *types.Var: + w.tag('V') + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + + case *types.Func: + sig, _ := obj.Type().(*types.Signature) + if sig.Recv() != nil { + panic(internalErrorf("unexpected method: %v", sig)) + } + w.tag('F') + w.pos(obj.Pos()) + w.signature(sig) + + case *types.Const: + w.tag('C') + w.pos(obj.Pos()) + w.value(obj.Type(), obj.Val()) + + case *types.TypeName: + if obj.IsAlias() { + w.tag('A') + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + break + } + + // Defined type. + w.tag('T') + w.pos(obj.Pos()) + + underlying := obj.Type().Underlying() + w.typ(underlying, obj.Pkg()) + + t := obj.Type() + if types.IsInterface(t) { + break + } + + named, ok := t.(*types.Named) + if !ok { + panic(internalErrorf("%s is not a defined type", t)) + } + + n := named.NumMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := named.Method(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.param(sig.Recv()) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected object: %v", obj)) + } + + p.declIndex[obj] = w.flush() +} + +func (w *exportWriter) tag(tag byte) { + w.data.WriteByte(tag) +} + +func (w *exportWriter) pos(pos token.Pos) { + p := w.p.fset.Position(pos) + file := p.Filename + line := int64(p.Line) + + // When file is the same as the last position (common case), + // we can save a few bytes by delta encoding just the line + // number. + // + // Note: Because data objects may be read out of order (or not + // at all), we can only apply delta encoding within a single + // object. This is handled implicitly by tracking prevFile and + // prevLine as fields of exportWriter. + + if file == w.prevFile { + delta := line - w.prevLine + w.int64(delta) + if delta == deltaNewFile { + w.int64(-1) + } + } else { + w.int64(deltaNewFile) + w.int64(line) // line >= 0 + w.string(file) + w.prevFile = file + } + w.prevLine = line +} + +func (w *exportWriter) pkg(pkg *types.Package) { + // Ensure any referenced packages are declared in the main index. + w.p.allPkgs[pkg] = true + + w.string(pkg.Path()) +} + +func (w *exportWriter) qualifiedIdent(obj types.Object) { + // Ensure any referenced declarations are written out too. + w.p.pushDecl(obj) + + w.string(obj.Name()) + w.pkg(obj.Pkg()) +} + +func (w *exportWriter) typ(t types.Type, pkg *types.Package) { + w.data.uint64(w.p.typOff(t, pkg)) +} + +func (p *iexporter) newWriter() *exportWriter { + return &exportWriter{p: p} +} + +func (w *exportWriter) flush() uint64 { + off := uint64(w.p.data0.Len()) + io.Copy(&w.p.data0, &w.data) + return off +} + +func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 { + off, ok := p.typIndex[t] + if !ok { + w := p.newWriter() + w.doTyp(t, pkg) + off = predeclReserved + w.flush() + p.typIndex[t] = off + } + return off +} + +func (w *exportWriter) startType(k itag) { + w.data.uint64(uint64(k)) +} + +func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { + switch t := t.(type) { + case *types.Named: + w.startType(definedType) + w.qualifiedIdent(t.Obj()) + + case *types.Pointer: + w.startType(pointerType) + w.typ(t.Elem(), pkg) + + case *types.Slice: + w.startType(sliceType) + w.typ(t.Elem(), pkg) + + case *types.Array: + w.startType(arrayType) + w.uint64(uint64(t.Len())) + w.typ(t.Elem(), pkg) + + case *types.Chan: + w.startType(chanType) + // 1 RecvOnly; 2 SendOnly; 3 SendRecv + var dir uint64 + switch t.Dir() { + case types.RecvOnly: + dir = 1 + case types.SendOnly: + dir = 2 + case types.SendRecv: + dir = 3 + } + w.uint64(dir) + w.typ(t.Elem(), pkg) + + case *types.Map: + w.startType(mapType) + w.typ(t.Key(), pkg) + w.typ(t.Elem(), pkg) + + case *types.Signature: + w.startType(signatureType) + w.setPkg(pkg, true) + w.signature(t) + + case *types.Struct: + w.startType(structType) + w.setPkg(pkg, true) + + n := t.NumFields() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + f := t.Field(i) + w.pos(f.Pos()) + w.string(f.Name()) + w.typ(f.Type(), pkg) + w.bool(f.Embedded()) + w.string(t.Tag(i)) // note (or tag) + } + + case *types.Interface: + w.startType(interfaceType) + w.setPkg(pkg, true) + + n := t.NumEmbeddeds() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + f := t.Embedded(i) + w.pos(f.Obj().Pos()) + w.typ(f.Obj().Type(), f.Obj().Pkg()) + } + + n = t.NumExplicitMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := t.ExplicitMethod(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t))) + } +} + +func (w *exportWriter) setPkg(pkg *types.Package, write bool) { + if write { + w.pkg(pkg) + } + + w.currPkg = pkg +} + +func (w *exportWriter) signature(sig *types.Signature) { + w.paramList(sig.Params()) + w.paramList(sig.Results()) + if sig.Params().Len() > 0 { + w.bool(sig.Variadic()) + } +} + +func (w *exportWriter) paramList(tup *types.Tuple) { + n := tup.Len() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + w.param(tup.At(i)) + } +} + +func (w *exportWriter) param(obj types.Object) { + w.pos(obj.Pos()) + w.localIdent(obj) + w.typ(obj.Type(), obj.Pkg()) +} + +func (w *exportWriter) value(typ types.Type, v constant.Value) { + w.typ(typ, nil) + + switch v.Kind() { + case constant.Bool: + w.bool(constant.BoolVal(v)) + case constant.Int: + var i big.Int + if i64, exact := constant.Int64Val(v); exact { + i.SetInt64(i64) + } else if ui64, exact := constant.Uint64Val(v); exact { + i.SetUint64(ui64) + } else { + i.SetString(v.ExactString(), 10) + } + w.mpint(&i, typ) + case constant.Float: + f := constantToFloat(v) + w.mpfloat(f, typ) + case constant.Complex: + w.mpfloat(constantToFloat(constant.Real(v)), typ) + w.mpfloat(constantToFloat(constant.Imag(v)), typ) + case constant.String: + w.string(constant.StringVal(v)) + case constant.Unknown: + // package contains type errors + default: + panic(internalErrorf("unexpected value %v (%T)", v, v)) + } +} + +// constantToFloat converts a constant.Value with kind constant.Float to a +// big.Float. +func constantToFloat(x constant.Value) *big.Float { + assert(x.Kind() == constant.Float) + // Use the same floating-point precision (512) as cmd/compile + // (see Mpprec in cmd/compile/internal/gc/mpfloat.go). + const mpprec = 512 + var f big.Float + f.SetPrec(mpprec) + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + n := valueToRat(num) + d := valueToRat(denom) + f.SetRat(n.Quo(n, d)) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + _, ok := f.SetString(x.ExactString()) + assert(ok) + } + return &f +} + +// mpint exports a multi-precision integer. +// +// For unsigned types, small values are written out as a single +// byte. Larger values are written out as a length-prefixed big-endian +// byte string, where the length prefix is encoded as its complement. +// For example, bytes 0, 1, and 2 directly represent the integer +// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-, +// 2-, and 3-byte big-endian string follow. +// +// Encoding for signed types use the same general approach as for +// unsigned types, except small values use zig-zag encoding and the +// bottom bit of length prefix byte for large values is reserved as a +// sign bit. +// +// The exact boundary between small and large encodings varies +// according to the maximum number of bytes needed to encode a value +// of type typ. As a special case, 8-bit types are always encoded as a +// single byte. +// +// TODO(mdempsky): Is this level of complexity really worthwhile? +func (w *exportWriter) mpint(x *big.Int, typ types.Type) { + basic, ok := typ.Underlying().(*types.Basic) + if !ok { + panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying())) + } + + signed, maxBytes := intSize(basic) + + negative := x.Sign() < 0 + if !signed && negative { + panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x)) + } + + b := x.Bytes() + if len(b) > 0 && b[0] == 0 { + panic(internalErrorf("leading zeros")) + } + if uint(len(b)) > maxBytes { + panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)) + } + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + // Check if x can use small value encoding. + if len(b) <= 1 { + var ux uint + if len(b) == 1 { + ux = uint(b[0]) + } + if signed { + ux <<= 1 + if negative { + ux-- + } + } + if ux < maxSmall { + w.data.WriteByte(byte(ux)) + return + } + } + + n := 256 - uint(len(b)) + if signed { + n = 256 - 2*uint(len(b)) + if negative { + n |= 1 + } + } + if n < maxSmall || n >= 256 { + panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)) + } + + w.data.WriteByte(byte(n)) + w.data.Write(b) +} + +// mpfloat exports a multi-precision floating point number. +// +// The number's value is decomposed into mantissa × 2**exponent, where +// mantissa is an integer. The value is written out as mantissa (as a +// multi-precision integer) and then the exponent, except exponent is +// omitted if mantissa is zero. +func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) { + if f.IsInf() { + panic("infinite constant") + } + + // Break into f = mant × 2**exp, with 0.5 <= mant < 1. + var mant big.Float + exp := int64(f.MantExp(&mant)) + + // Scale so that mant is an integer. + prec := mant.MinPrec() + mant.SetMantExp(&mant, int(prec)) + exp -= int64(prec) + + manti, acc := mant.Int(nil) + if acc != big.Exact { + panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc)) + } + w.mpint(manti, typ) + if manti.Sign() != 0 { + w.int64(exp) + } +} + +func (w *exportWriter) bool(b bool) bool { + var x uint64 + if b { + x = 1 + } + w.uint64(x) + return b +} + +func (w *exportWriter) int64(x int64) { w.data.int64(x) } +func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) } +func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } + +func (w *exportWriter) localIdent(obj types.Object) { + // Anonymous parameters. + if obj == nil { + w.string("") + return + } + + name := obj.Name() + if name == "_" { + w.string("_") + return + } + + w.string(name) +} + +type intWriter struct { + bytes.Buffer +} + +func (w *intWriter) int64(x int64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutVarint(buf[:], x) + w.Write(buf[:n]) +} + +func (w *intWriter) uint64(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + w.Write(buf[:n]) +} + +func assert(cond bool) { + if !cond { + panic("internal error: assertion failed") + } +} + +// The below is copied from go/src/cmd/compile/internal/gc/syntax.go. + +// objQueue is a FIFO queue of types.Object. The zero value of objQueue is +// a ready-to-use empty queue. +type objQueue struct { + ring []types.Object + head, tail int +} + +// empty returns true if q contains no Nodes. +func (q *objQueue) empty() bool { + return q.head == q.tail +} + +// pushTail appends n to the tail of the queue. +func (q *objQueue) pushTail(obj types.Object) { + if len(q.ring) == 0 { + q.ring = make([]types.Object, 16) + } else if q.head+len(q.ring) == q.tail { + // Grow the ring. + nring := make([]types.Object, len(q.ring)*2) + // Copy the old elements. + part := q.ring[q.head%len(q.ring):] + if q.tail-q.head <= len(part) { + part = part[:q.tail-q.head] + copy(nring, part) + } else { + pos := copy(nring, part) + copy(nring[pos:], q.ring[:q.tail%len(q.ring)]) + } + q.ring, q.head, q.tail = nring, 0, q.tail-q.head + } + + q.ring[q.tail%len(q.ring)] = obj + q.tail++ +} + +// popHead pops a node from the head of the queue. It panics if q is empty. +func (q *objQueue) popHead() types.Object { + if q.empty() { + panic("dequeue empty") + } + obj := q.ring[q.head%len(q.ring)] + q.head++ + return obj +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go new file mode 100644 index 00000000..3cb7ae5b --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go @@ -0,0 +1,606 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed package import. +// See cmd/compile/internal/gc/iexport.go for the export data format. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "io" + "sort" +) + +type intReader struct { + *bytes.Reader + path string +} + +func (r *intReader) int64() int64 { + i, err := binary.ReadVarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +func (r *intReader) uint64() uint64 { + i, err := binary.ReadUvarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +const predeclReserved = 32 + +type itag uint64 + +const ( + // Types + definedType itag = iota + pointerType + sliceType + arrayType + chanType + mapType + signatureType + structType + interfaceType +) + +// IImportData imports a package from the serialized package data +// and returns the number of bytes consumed and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + const currentVersion = 0 + version := -1 + defer func() { + if e := recover(); e != nil { + if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + } + } + }() + + r := &intReader{bytes.NewReader(data), path} + + version = int(r.uint64()) + switch version { + case currentVersion: + default: + errorf("unknown iexport format version %d", version) + } + + sLen := int64(r.uint64()) + dLen := int64(r.uint64()) + + whence, _ := r.Seek(0, io.SeekCurrent) + stringData := data[whence : whence+sLen] + declData := data[whence+sLen : whence+sLen+dLen] + r.Seek(sLen+dLen, io.SeekCurrent) + + p := iimporter{ + ipath: path, + + stringData: stringData, + stringCache: make(map[uint64]string), + pkgCache: make(map[uint64]*types.Package), + + declData: declData, + pkgIndex: make(map[*types.Package]map[string]uint64), + typCache: make(map[uint64]types.Type), + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*token.File), + }, + } + + for i, pt := range predeclared() { + p.typCache[uint64(i)] = pt + } + + pkgList := make([]*types.Package, r.uint64()) + for i := range pkgList { + pkgPathOff := r.uint64() + pkgPath := p.stringAt(pkgPathOff) + pkgName := p.stringAt(r.uint64()) + _ = r.uint64() // package height; unused by go/types + + if pkgPath == "" { + pkgPath = path + } + pkg := imports[pkgPath] + if pkg == nil { + pkg = types.NewPackage(pkgPath, pkgName) + imports[pkgPath] = pkg + } else if pkg.Name() != pkgName { + errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) + } + + p.pkgCache[pkgPathOff] = pkg + + nameIndex := make(map[string]uint64) + for nSyms := r.uint64(); nSyms > 0; nSyms-- { + name := p.stringAt(r.uint64()) + nameIndex[name] = r.uint64() + } + + p.pkgIndex[pkg] = nameIndex + pkgList[i] = pkg + } + var localpkg *types.Package + for _, pkg := range pkgList { + if pkg.Path() == path { + localpkg = pkg + } + } + + names := make([]string, 0, len(p.pkgIndex[localpkg])) + for name := range p.pkgIndex[localpkg] { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + p.doDecl(localpkg, name) + } + + for _, typ := range p.interfaceList { + typ.Complete() + } + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), pkgList[1:]...) + sort.Sort(byPath(list)) + localpkg.SetImports(list) + + // package was imported completely and without errors + localpkg.MarkComplete() + + consumed, _ := r.Seek(0, io.SeekCurrent) + return int(consumed), localpkg, nil +} + +type iimporter struct { + ipath string + + stringData []byte + stringCache map[uint64]string + pkgCache map[uint64]*types.Package + + declData []byte + pkgIndex map[*types.Package]map[string]uint64 + typCache map[uint64]types.Type + + fake fakeFileSet + interfaceList []*types.Interface +} + +func (p *iimporter) doDecl(pkg *types.Package, name string) { + // See if we've already imported this declaration. + if obj := pkg.Scope().Lookup(name); obj != nil { + return + } + + off, ok := p.pkgIndex[pkg][name] + if !ok { + errorf("%v.%v not in index", pkg, name) + } + + r := &importReader{p: p, currPkg: pkg} + r.declReader.Reset(p.declData[off:]) + + r.obj(name) +} + +func (p *iimporter) stringAt(off uint64) string { + if s, ok := p.stringCache[off]; ok { + return s + } + + slen, n := binary.Uvarint(p.stringData[off:]) + if n <= 0 { + errorf("varint failed") + } + spos := off + uint64(n) + s := string(p.stringData[spos : spos+slen]) + p.stringCache[off] = s + return s +} + +func (p *iimporter) pkgAt(off uint64) *types.Package { + if pkg, ok := p.pkgCache[off]; ok { + return pkg + } + path := p.stringAt(off) + errorf("missing package %q in %q", path, p.ipath) + return nil +} + +func (p *iimporter) typAt(off uint64, base *types.Named) types.Type { + if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) { + return t + } + + if off < predeclReserved { + errorf("predeclared type missing from cache: %v", off) + } + + r := &importReader{p: p} + r.declReader.Reset(p.declData[off-predeclReserved:]) + t := r.doType(base) + + if base == nil || !isInterface(t) { + p.typCache[off] = t + } + return t +} + +type importReader struct { + p *iimporter + declReader bytes.Reader + currPkg *types.Package + prevFile string + prevLine int64 +} + +func (r *importReader) obj(name string) { + tag := r.byte() + pos := r.pos() + + switch tag { + case 'A': + typ := r.typ() + + r.declare(types.NewTypeName(pos, r.currPkg, name, typ)) + + case 'C': + typ, val := r.value() + + r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) + + case 'F': + sig := r.signature(nil) + + r.declare(types.NewFunc(pos, r.currPkg, name, sig)) + + case 'T': + // Types can be recursive. We need to setup a stub + // declaration before recursing. + obj := types.NewTypeName(pos, r.currPkg, name, nil) + named := types.NewNamed(obj, nil, nil) + r.declare(obj) + + underlying := r.p.typAt(r.uint64(), named).Underlying() + named.SetUnderlying(underlying) + + if !isInterface(underlying) { + for n := r.uint64(); n > 0; n-- { + mpos := r.pos() + mname := r.ident() + recv := r.param() + msig := r.signature(recv) + + named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig)) + } + } + + case 'V': + typ := r.typ() + + r.declare(types.NewVar(pos, r.currPkg, name, typ)) + + default: + errorf("unexpected tag: %v", tag) + } +} + +func (r *importReader) declare(obj types.Object) { + obj.Pkg().Scope().Insert(obj) +} + +func (r *importReader) value() (typ types.Type, val constant.Value) { + typ = r.typ() + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + val = constant.MakeBool(r.bool()) + + case types.IsString: + val = constant.MakeString(r.string()) + + case types.IsInteger: + val = r.mpint(b) + + case types.IsFloat: + val = r.mpfloat(b) + + case types.IsComplex: + re := r.mpfloat(b) + im := r.mpfloat(b) + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + default: + if b.Kind() == types.Invalid { + val = constant.MakeUnknown() + return + } + errorf("unexpected type %v", typ) // panics + panic("unreachable") + } + + return +} + +func intSize(b *types.Basic) (signed bool, maxBytes uint) { + if (b.Info() & types.IsUntyped) != 0 { + return true, 64 + } + + switch b.Kind() { + case types.Float32, types.Complex64: + return true, 3 + case types.Float64, types.Complex128: + return true, 7 + } + + signed = (b.Info() & types.IsUnsigned) == 0 + switch b.Kind() { + case types.Int8, types.Uint8: + maxBytes = 1 + case types.Int16, types.Uint16: + maxBytes = 2 + case types.Int32, types.Uint32: + maxBytes = 4 + default: + maxBytes = 8 + } + + return +} + +func (r *importReader) mpint(b *types.Basic) constant.Value { + signed, maxBytes := intSize(b) + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + n, _ := r.declReader.ReadByte() + if uint(n) < maxSmall { + v := int64(n) + if signed { + v >>= 1 + if n&1 != 0 { + v = ^v + } + } + return constant.MakeInt64(v) + } + + v := -n + if signed { + v = -(n &^ 1) >> 1 + } + if v < 1 || uint(v) > maxBytes { + errorf("weird decoding: %v, %v => %v", n, signed, v) + } + + buf := make([]byte, v) + io.ReadFull(&r.declReader, buf) + + // convert to little endian + // TODO(gri) go/constant should have a more direct conversion function + // (e.g., once it supports a big.Float based implementation) + for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 { + buf[i], buf[j] = buf[j], buf[i] + } + + x := constant.MakeFromBytes(buf) + if signed && n&1 != 0 { + x = constant.UnaryOp(token.SUB, x, 0) + } + return x +} + +func (r *importReader) mpfloat(b *types.Basic) constant.Value { + x := r.mpint(b) + if constant.Sign(x) == 0 { + return x + } + + exp := r.int64() + switch { + case exp > 0: + x = constant.Shift(x, token.SHL, uint(exp)) + case exp < 0: + d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) + x = constant.BinaryOp(x, token.QUO, d) + } + return x +} + +func (r *importReader) ident() string { + return r.string() +} + +func (r *importReader) qualifiedIdent() (*types.Package, string) { + name := r.string() + pkg := r.pkg() + return pkg, name +} + +func (r *importReader) pos() token.Pos { + delta := r.int64() + if delta != deltaNewFile { + r.prevLine += delta + } else if l := r.int64(); l == -1 { + r.prevLine += deltaNewFile + } else { + r.prevFile = r.string() + r.prevLine = l + } + + if r.prevFile == "" && r.prevLine == 0 { + return token.NoPos + } + + return r.p.fake.pos(r.prevFile, int(r.prevLine)) +} + +func (r *importReader) typ() types.Type { + return r.p.typAt(r.uint64(), nil) +} + +func isInterface(t types.Type) bool { + _, ok := t.(*types.Interface) + return ok +} + +func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) } +func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } + +func (r *importReader) doType(base *types.Named) types.Type { + switch k := r.kind(); k { + default: + errorf("unexpected kind tag in %q: %v", r.p.ipath, k) + return nil + + case definedType: + pkg, name := r.qualifiedIdent() + r.p.doDecl(pkg, name) + return pkg.Scope().Lookup(name).(*types.TypeName).Type() + case pointerType: + return types.NewPointer(r.typ()) + case sliceType: + return types.NewSlice(r.typ()) + case arrayType: + n := r.uint64() + return types.NewArray(r.typ(), int64(n)) + case chanType: + dir := chanDir(int(r.uint64())) + return types.NewChan(dir, r.typ()) + case mapType: + return types.NewMap(r.typ(), r.typ()) + case signatureType: + r.currPkg = r.pkg() + return r.signature(nil) + + case structType: + r.currPkg = r.pkg() + + fields := make([]*types.Var, r.uint64()) + tags := make([]string, len(fields)) + for i := range fields { + fpos := r.pos() + fname := r.ident() + ftyp := r.typ() + emb := r.bool() + tag := r.string() + + fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + tags[i] = tag + } + return types.NewStruct(fields, tags) + + case interfaceType: + r.currPkg = r.pkg() + + embeddeds := make([]types.Type, r.uint64()) + for i := range embeddeds { + _ = r.pos() + embeddeds[i] = r.typ() + } + + methods := make([]*types.Func, r.uint64()) + for i := range methods { + mpos := r.pos() + mname := r.ident() + + // TODO(mdempsky): Matches bimport.go, but I + // don't agree with this. + var recv *types.Var + if base != nil { + recv = types.NewVar(token.NoPos, r.currPkg, "", base) + } + + msig := r.signature(recv) + methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig) + } + + typ := newInterface(methods, embeddeds) + r.p.interfaceList = append(r.p.interfaceList, typ) + return typ + } +} + +func (r *importReader) kind() itag { + return itag(r.uint64()) +} + +func (r *importReader) signature(recv *types.Var) *types.Signature { + params := r.paramList() + results := r.paramList() + variadic := params.Len() > 0 && r.bool() + return types.NewSignature(recv, params, results, variadic) +} + +func (r *importReader) paramList() *types.Tuple { + xs := make([]*types.Var, r.uint64()) + for i := range xs { + xs[i] = r.param() + } + return types.NewTuple(xs...) +} + +func (r *importReader) param() *types.Var { + pos := r.pos() + name := r.ident() + typ := r.typ() + return types.NewParam(pos, r.currPkg, name, typ) +} + +func (r *importReader) bool() bool { + return r.uint64() != 0 +} + +func (r *importReader) int64() int64 { + n, err := binary.ReadVarint(&r.declReader) + if err != nil { + errorf("readVarint: %v", err) + } + return n +} + +func (r *importReader) uint64() uint64 { + n, err := binary.ReadUvarint(&r.declReader) + if err != nil { + errorf("readUvarint: %v", err) + } + return n +} + +func (r *importReader) byte() byte { + x, err := r.declReader.ReadByte() + if err != nil { + errorf("declReader.ReadByte: %v", err) + } + return x +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go new file mode 100644 index 00000000..463f2522 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go @@ -0,0 +1,21 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + named := make([]*types.Named, len(embeddeds)) + for i, e := range embeddeds { + var ok bool + named[i], ok = e.(*types.Named) + if !ok { + panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") + } + } + return types.NewInterface(methods, named) +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go new file mode 100644 index 00000000..ab28b95c --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + return types.NewInterfaceType(methods, embeddeds) +} diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go new file mode 100644 index 00000000..fdc7da05 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -0,0 +1,160 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagesdriver fetches type sizes for go/packages and go/analysis. +package packagesdriver + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "go/types" + "log" + "os" + "os/exec" + "strings" + "time" +) + +var debug = false + +// GetSizes returns the sizes used by the underlying driver with the given parameters. +func GetSizes(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) { + // TODO(matloob): Clean this up. This code is mostly a copy of packages.findExternalDriver. + const toolPrefix = "GOPACKAGESDRIVER=" + tool := "" + for _, env := range env { + if val := strings.TrimPrefix(env, toolPrefix); val != env { + tool = val + } + } + + if tool == "" { + var err error + tool, err = exec.LookPath("gopackagesdriver") + if err != nil { + // We did not find the driver, so use "go list". + tool = "off" + } + } + + if tool == "off" { + return GetSizesGolist(ctx, buildFlags, env, dir, usesExportData) + } + + req, err := json.Marshal(struct { + Command string `json:"command"` + Env []string `json:"env"` + BuildFlags []string `json:"build_flags"` + }{ + Command: "sizes", + Env: env, + BuildFlags: buildFlags, + }) + if err != nil { + return nil, fmt.Errorf("failed to encode message to driver tool: %v", err) + } + + buf := new(bytes.Buffer) + cmd := exec.CommandContext(ctx, tool) + cmd.Dir = dir + cmd.Env = env + cmd.Stdin = bytes.NewReader(req) + cmd.Stdout = buf + cmd.Stderr = new(bytes.Buffer) + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) + } + var response struct { + // Sizes, if not nil, is the types.Sizes to use when type checking. + Sizes *types.StdSizes + } + if err := json.Unmarshal(buf.Bytes(), &response); err != nil { + return nil, err + } + return response.Sizes, nil +} + +func GetSizesGolist(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) { + args := []string{"list", "-f", "{{context.GOARCH}} {{context.Compiler}}"} + args = append(args, buildFlags...) + args = append(args, "--", "unsafe") + stdout, err := InvokeGo(ctx, env, dir, usesExportData, args...) + if err != nil { + return nil, err + } + fields := strings.Fields(stdout.String()) + if len(fields) < 2 { + return nil, fmt.Errorf("could not determine GOARCH and Go compiler") + } + goarch := fields[0] + compiler := fields[1] + return types.SizesFor(compiler, goarch), nil +} + +// InvokeGo returns the stdout of a go command invocation. +func InvokeGo(ctx context.Context, env []string, dir string, usesExportData bool, args ...string) (*bytes.Buffer, error) { + if debug { + defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(env, args...)) }(time.Now()) + } + stdout := new(bytes.Buffer) + stderr := new(bytes.Buffer) + cmd := exec.CommandContext(ctx, "go", args...) + // On darwin the cwd gets resolved to the real path, which breaks anything that + // expects the working directory to keep the original path, including the + // go command when dealing with modules. + // The Go stdlib has a special feature where if the cwd and the PWD are the + // same node then it trusts the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go command. + cmd.Env = append(append([]string{}, env...), "PWD="+dir) + cmd.Dir = dir + cmd.Stdout = stdout + cmd.Stderr = stderr + if err := cmd.Run(); err != nil { + exitErr, ok := err.(*exec.ExitError) + if !ok { + // Catastrophic error: + // - executable not found + // - context cancellation + return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err) + } + + // Export mode entails a build. + // If that build fails, errors appear on stderr + // (despite the -e flag) and the Export field is blank. + // Do not fail in that case. + if !usesExportData { + return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr) + } + } + + // As of writing, go list -export prints some non-fatal compilation + // errors to stderr, even with -e set. We would prefer that it put + // them in the Package.Error JSON (see https://golang.org/issue/26319). + // In the meantime, there's nowhere good to put them, but they can + // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS + // is set. + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(env, args...), stderr) + } + + // debugging + if false { + fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(env, args...), stdout) + } + + return stdout, nil +} + +func cmdDebugStr(envlist []string, args ...string) string { + env := make(map[string]string) + for _, kv := range envlist { + split := strings.Split(kv, "=") + k, v := split[0], split[1] + env[k] = v + } + + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], args) +} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go new file mode 100644 index 00000000..3799f8ed --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -0,0 +1,222 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package packages loads Go packages for inspection and analysis. + +The Load function takes as input a list of patterns and return a list of Package +structs describing individual packages matched by those patterns. +The LoadMode controls the amount of detail in the loaded packages. + +Load passes most patterns directly to the underlying build tool, +but all patterns with the prefix "query=", where query is a +non-empty string of letters from [a-z], are reserved and may be +interpreted as query operators. + +Two query operators are currently supported: "file" and "pattern". + +The query "file=path/to/file.go" matches the package or packages enclosing +the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go" +might return the packages "fmt" and "fmt [fmt.test]". + +The query "pattern=string" causes "string" to be passed directly to +the underlying build tool. In most cases this is unnecessary, +but an application can use Load("pattern=" + x) as an escaping mechanism +to ensure that x is not interpreted as a query operator if it contains '='. + +All other query operators are reserved for future use and currently +cause Load to report an error. + +The Package struct provides basic information about the package, including + + - ID, a unique identifier for the package in the returned set; + - GoFiles, the names of the package's Go source files; + - Imports, a map from source import strings to the Packages they name; + - Types, the type information for the package's exported symbols; + - Syntax, the parsed syntax trees for the package's source code; and + - TypeInfo, the result of a complete type-check of the package syntax trees. + +(See the documentation for type Package for the complete list of fields +and more detailed descriptions.) + +For example, + + Load(nil, "bytes", "unicode...") + +returns four Package structs describing the standard library packages +bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern +can match multiple packages and that a package might be matched by +multiple patterns: in general it is not possible to determine which +packages correspond to which patterns. + +Note that the list returned by Load contains only the packages matched +by the patterns. Their dependencies can be found by walking the import +graph using the Imports fields. + +The Load function can be configured by passing a pointer to a Config as +the first argument. A nil Config is equivalent to the zero Config, which +causes Load to run in LoadFiles mode, collecting minimal information. +See the documentation for type Config for details. + +As noted earlier, the Config.Mode controls the amount of detail +reported about the loaded packages, with each mode returning all the data of the +previous mode with some extra added. See the documentation for type LoadMode +for details. + +Most tools should pass their command-line arguments (after any flags) +uninterpreted to the loader, so that the loader can interpret them +according to the conventions of the underlying build system. +See the Example function for typical usage. + +*/ +package packages // import "golang.org/x/tools/go/packages" + +/* + +Motivation and design considerations + +The new package's design solves problems addressed by two existing +packages: go/build, which locates and describes packages, and +golang.org/x/tools/go/loader, which loads, parses and type-checks them. +The go/build.Package structure encodes too much of the 'go build' way +of organizing projects, leaving us in need of a data type that describes a +package of Go source code independent of the underlying build system. +We wanted something that works equally well with go build and vgo, and +also other build systems such as Bazel and Blaze, making it possible to +construct analysis tools that work in all these environments. +Tools such as errcheck and staticcheck were essentially unavailable to +the Go community at Google, and some of Google's internal tools for Go +are unavailable externally. +This new package provides a uniform way to obtain package metadata by +querying each of these build systems, optionally supporting their +preferred command-line notations for packages, so that tools integrate +neatly with users' build environments. The Metadata query function +executes an external query tool appropriate to the current workspace. + +Loading packages always returns the complete import graph "all the way down", +even if all you want is information about a single package, because the query +mechanisms of all the build systems we currently support ({go,vgo} list, and +blaze/bazel aspect-based query) cannot provide detailed information +about one package without visiting all its dependencies too, so there is +no additional asymptotic cost to providing transitive information. +(This property might not be true of a hypothetical 5th build system.) + +In calls to TypeCheck, all initial packages, and any package that +transitively depends on one of them, must be loaded from source. +Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from +source; D may be loaded from export data, and E may not be loaded at all +(though it's possible that D's export data mentions it, so a +types.Package may be created for it and exposed.) + +The old loader had a feature to suppress type-checking of function +bodies on a per-package basis, primarily intended to reduce the work of +obtaining type information for imported packages. Now that imports are +satisfied by export data, the optimization no longer seems necessary. + +Despite some early attempts, the old loader did not exploit export data, +instead always using the equivalent of WholeProgram mode. This was due +to the complexity of mixing source and export data packages (now +resolved by the upward traversal mentioned above), and because export data +files were nearly always missing or stale. Now that 'go build' supports +caching, all the underlying build systems can guarantee to produce +export data in a reasonable (amortized) time. + +Test "main" packages synthesized by the build system are now reported as +first-class packages, avoiding the need for clients (such as go/ssa) to +reinvent this generation logic. + +One way in which go/packages is simpler than the old loader is in its +treatment of in-package tests. In-package tests are packages that +consist of all the files of the library under test, plus the test files. +The old loader constructed in-package tests by a two-phase process of +mutation called "augmentation": first it would construct and type check +all the ordinary library packages and type-check the packages that +depend on them; then it would add more (test) files to the package and +type-check again. This two-phase approach had four major problems: +1) in processing the tests, the loader modified the library package, + leaving no way for a client application to see both the test + package and the library package; one would mutate into the other. +2) because test files can declare additional methods on types defined in + the library portion of the package, the dispatch of method calls in + the library portion was affected by the presence of the test files. + This should have been a clue that the packages were logically + different. +3) this model of "augmentation" assumed at most one in-package test + per library package, which is true of projects using 'go build', + but not other build systems. +4) because of the two-phase nature of test processing, all packages that + import the library package had to be processed before augmentation, + forcing a "one-shot" API and preventing the client from calling Load + in several times in sequence as is now possible in WholeProgram mode. + (TypeCheck mode has a similar one-shot restriction for a different reason.) + +Early drafts of this package supported "multi-shot" operation. +Although it allowed clients to make a sequence of calls (or concurrent +calls) to Load, building up the graph of Packages incrementally, +it was of marginal value: it complicated the API +(since it allowed some options to vary across calls but not others), +it complicated the implementation, +it cannot be made to work in Types mode, as explained above, +and it was less efficient than making one combined call (when this is possible). +Among the clients we have inspected, none made multiple calls to load +but could not be easily and satisfactorily modified to make only a single call. +However, applications changes may be required. +For example, the ssadump command loads the user-specified packages +and in addition the runtime package. It is tempting to simply append +"runtime" to the user-provided list, but that does not work if the user +specified an ad-hoc package such as [a.go b.go]. +Instead, ssadump no longer requests the runtime package, +but seeks it among the dependencies of the user-specified packages, +and emits an error if it is not found. + +Overlays: The Overlay field in the Config allows providing alternate contents +for Go source files, by providing a mapping from file path to contents. +go/packages will pull in new imports added in overlay files when go/packages +is run in LoadImports mode or greater. +Overlay support for the go list driver isn't complete yet: if the file doesn't +exist on disk, it will only be recognized in an overlay if it is a non-test file +and the package would be reported even without the overlay. + +Questions & Tasks + +- Add GOARCH/GOOS? + They are not portable concepts, but could be made portable. + Our goal has been to allow users to express themselves using the conventions + of the underlying build system: if the build system honors GOARCH + during a build and during a metadata query, then so should + applications built atop that query mechanism. + Conversely, if the target architecture of the build is determined by + command-line flags, the application can pass the relevant + flags through to the build system using a command such as: + myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin" + However, this approach is low-level, unwieldy, and non-portable. + GOOS and GOARCH seem important enough to warrant a dedicated option. + +- How should we handle partial failures such as a mixture of good and + malformed patterns, existing and non-existent packages, successful and + failed builds, import failures, import cycles, and so on, in a call to + Load? + +- Support bazel, blaze, and go1.10 list, not just go1.11 list. + +- Handle (and test) various partial success cases, e.g. + a mixture of good packages and: + invalid patterns + nonexistent packages + empty packages + packages with malformed package or import declarations + unreadable files + import cycles + other parse errors + type errors + Make sure we record errors at the correct place in the graph. + +- Missing packages among initial arguments are not reported. + Return bogus packages for them, like golist does. + +- "undeclared name" errors (for example) are reported out of source file + order. I suspect this is due to the breadth-first resolution now used + by go/types. Is that a bug? Discuss with gri. + +*/ diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go new file mode 100644 index 00000000..22ff769e --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -0,0 +1,79 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file enables an external tool to intercept package requests. +// If the tool is present then its results are used in preference to +// the go list command. + +package packages + +import ( + "bytes" + "encoding/json" + "fmt" + "os/exec" + "strings" +) + +// Driver +type driverRequest struct { + Command string `json:"command"` + Mode LoadMode `json:"mode"` + Env []string `json:"env"` + BuildFlags []string `json:"build_flags"` + Tests bool `json:"tests"` + Overlay map[string][]byte `json:"overlay"` +} + +// findExternalDriver returns the file path of a tool that supplies +// the build system package structure, or "" if not found." +// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its +// value, otherwise it searches for a binary named gopackagesdriver on the PATH. +func findExternalDriver(cfg *Config) driver { + const toolPrefix = "GOPACKAGESDRIVER=" + tool := "" + for _, env := range cfg.Env { + if val := strings.TrimPrefix(env, toolPrefix); val != env { + tool = val + } + } + if tool != "" && tool == "off" { + return nil + } + if tool == "" { + var err error + tool, err = exec.LookPath("gopackagesdriver") + if err != nil { + return nil + } + } + return func(cfg *Config, words ...string) (*driverResponse, error) { + req, err := json.Marshal(driverRequest{ + Mode: cfg.Mode, + Env: cfg.Env, + BuildFlags: cfg.BuildFlags, + Tests: cfg.Tests, + Overlay: cfg.Overlay, + }) + if err != nil { + return nil, fmt.Errorf("failed to encode message to driver tool: %v", err) + } + + buf := new(bytes.Buffer) + cmd := exec.CommandContext(cfg.Context, tool, words...) + cmd.Dir = cfg.Dir + cmd.Env = cfg.Env + cmd.Stdin = bytes.NewReader(req) + cmd.Stdout = buf + cmd.Stderr = new(bytes.Buffer) + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) + } + var response driverResponse + if err := json.Unmarshal(buf.Bytes(), &response); err != nil { + return nil, err + } + return &response, nil + } +} diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go new file mode 100644 index 00000000..3a0d4b01 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -0,0 +1,832 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "bytes" + "encoding/json" + "fmt" + "go/types" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "reflect" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/tools/go/internal/packagesdriver" + "golang.org/x/tools/internal/gopathwalk" + "golang.org/x/tools/internal/semver" +) + +// debug controls verbose logging. +var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG")) + +// A goTooOldError reports that the go command +// found by exec.LookPath is too old to use the new go list behavior. +type goTooOldError struct { + error +} + +// responseDeduper wraps a driverResponse, deduplicating its contents. +type responseDeduper struct { + seenRoots map[string]bool + seenPackages map[string]*Package + dr *driverResponse +} + +// init fills in r with a driverResponse. +func (r *responseDeduper) init(dr *driverResponse) { + r.dr = dr + r.seenRoots = map[string]bool{} + r.seenPackages = map[string]*Package{} + for _, pkg := range dr.Packages { + r.seenPackages[pkg.ID] = pkg + } + for _, root := range dr.Roots { + r.seenRoots[root] = true + } +} + +func (r *responseDeduper) addPackage(p *Package) { + if r.seenPackages[p.ID] != nil { + return + } + r.seenPackages[p.ID] = p + r.dr.Packages = append(r.dr.Packages, p) +} + +func (r *responseDeduper) addRoot(id string) { + if r.seenRoots[id] { + return + } + r.seenRoots[id] = true + r.dr.Roots = append(r.dr.Roots, id) +} + +// goListDriver uses the go list command to interpret the patterns and produce +// the build system package structure. +// See driver for more details. +func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + var sizes types.Sizes + var sizeserr error + var sizeswg sync.WaitGroup + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { + sizeswg.Add(1) + go func() { + sizes, sizeserr = getSizes(cfg) + sizeswg.Done() + }() + } + + // Determine files requested in contains patterns + var containFiles []string + var packagesNamed []string + restPatterns := make([]string, 0, len(patterns)) + // Extract file= and other [querytype]= patterns. Report an error if querytype + // doesn't exist. +extractQueries: + for _, pattern := range patterns { + eqidx := strings.Index(pattern, "=") + if eqidx < 0 { + restPatterns = append(restPatterns, pattern) + } else { + query, value := pattern[:eqidx], pattern[eqidx+len("="):] + switch query { + case "file": + containFiles = append(containFiles, value) + case "pattern": + restPatterns = append(restPatterns, value) + case "iamashamedtousethedisabledqueryname": + packagesNamed = append(packagesNamed, value) + case "": // not a reserved query + restPatterns = append(restPatterns, pattern) + default: + for _, rune := range query { + if rune < 'a' || rune > 'z' { // not a reserved query + restPatterns = append(restPatterns, pattern) + continue extractQueries + } + } + // Reject all other patterns containing "=" + return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern) + } + } + } + + response := &responseDeduper{} + var err error + + // See if we have any patterns to pass through to go list. Zero initial + // patterns also requires a go list call, since it's the equivalent of + // ".". + if len(restPatterns) > 0 || len(patterns) == 0 { + dr, err := golistDriver(cfg, restPatterns...) + if err != nil { + return nil, err + } + response.init(dr) + } else { + response.init(&driverResponse{}) + } + + sizeswg.Wait() + if sizeserr != nil { + return nil, sizeserr + } + // types.SizesFor always returns nil or a *types.StdSizes + response.dr.Sizes, _ = sizes.(*types.StdSizes) + + var containsCandidates []string + + if len(containFiles) != 0 { + if err := runContainsQueries(cfg, golistDriver, response, containFiles); err != nil { + return nil, err + } + } + + if len(packagesNamed) != 0 { + if err := runNamedQueries(cfg, golistDriver, response, packagesNamed); err != nil { + return nil, err + } + } + + modifiedPkgs, needPkgs, err := processGolistOverlay(cfg, response.dr) + if err != nil { + return nil, err + } + if len(containFiles) > 0 { + containsCandidates = append(containsCandidates, modifiedPkgs...) + containsCandidates = append(containsCandidates, needPkgs...) + } + if err := addNeededOverlayPackages(cfg, golistDriver, response, needPkgs); err != nil { + return nil, err + } + // Check candidate packages for containFiles. + if len(containFiles) > 0 { + for _, id := range containsCandidates { + pkg := response.seenPackages[id] + for _, f := range containFiles { + for _, g := range pkg.GoFiles { + if sameFile(f, g) { + response.addRoot(id) + } + } + } + } + } + + return response.dr, nil +} + +func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string) error { + if len(pkgs) == 0 { + return nil + } + dr, err := driver(cfg, pkgs...) + if err != nil { + return err + } + for _, pkg := range dr.Packages { + response.addPackage(pkg) + } + _, needPkgs, err := processGolistOverlay(cfg, response.dr) + if err != nil { + return err + } + addNeededOverlayPackages(cfg, driver, response, needPkgs) + return nil +} + +func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error { + for _, query := range queries { + // TODO(matloob): Do only one query per directory. + fdir := filepath.Dir(query) + // Pass absolute path of directory to go list so that it knows to treat it as a directory, + // not a package path. + pattern, err := filepath.Abs(fdir) + if err != nil { + return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) + } + dirResponse, err := driver(cfg, pattern) + if err != nil { + return err + } + isRoot := make(map[string]bool, len(dirResponse.Roots)) + for _, root := range dirResponse.Roots { + isRoot[root] = true + } + for _, pkg := range dirResponse.Packages { + // Add any new packages to the main set + // We don't bother to filter packages that will be dropped by the changes of roots, + // that will happen anyway during graph construction outside this function. + // Over-reporting packages is not a problem. + response.addPackage(pkg) + // if the package was not a root one, it cannot have the file + if !isRoot[pkg.ID] { + continue + } + for _, pkgFile := range pkg.GoFiles { + if filepath.Base(query) == filepath.Base(pkgFile) { + response.addRoot(pkg.ID) + break + } + } + } + } + return nil +} + +// modCacheRegexp splits a path in a module cache into module, module version, and package. +var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) + +func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error { + // calling `go env` isn't free; bail out if there's nothing to do. + if len(queries) == 0 { + return nil + } + // Determine which directories are relevant to scan. + roots, modRoot, err := roots(cfg) + if err != nil { + return err + } + + // Scan the selected directories. Simple matches, from GOPATH/GOROOT + // or the local module, can simply be "go list"ed. Matches from the + // module cache need special treatment. + var matchesMu sync.Mutex + var simpleMatches, modCacheMatches []string + add := func(root gopathwalk.Root, dir string) { + // Walk calls this concurrently; protect the result slices. + matchesMu.Lock() + defer matchesMu.Unlock() + + path := dir + if dir != root.Path { + path = dir[len(root.Path)+1:] + } + if pathMatchesQueries(path, queries) { + switch root.Type { + case gopathwalk.RootModuleCache: + modCacheMatches = append(modCacheMatches, path) + case gopathwalk.RootCurrentModule: + // We'd need to read go.mod to find the full + // import path. Relative's easier. + rel, err := filepath.Rel(cfg.Dir, dir) + if err != nil { + // This ought to be impossible, since + // we found dir in the current module. + panic(err) + } + simpleMatches = append(simpleMatches, "./"+rel) + case gopathwalk.RootGOPATH, gopathwalk.RootGOROOT: + simpleMatches = append(simpleMatches, path) + } + } + } + + startWalk := time.Now() + gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modRoot != "", Debug: debug}) + if debug { + log.Printf("%v for walk", time.Since(startWalk)) + } + + // Weird special case: the top-level package in a module will be in + // whatever directory the user checked the repository out into. It's + // more reasonable for that to not match the package name. So, if there + // are any Go files in the mod root, query it just to be safe. + if modRoot != "" { + rel, err := filepath.Rel(cfg.Dir, modRoot) + if err != nil { + panic(err) // See above. + } + + files, err := ioutil.ReadDir(modRoot) + for _, f := range files { + if strings.HasSuffix(f.Name(), ".go") { + simpleMatches = append(simpleMatches, rel) + break + } + } + } + + addResponse := func(r *driverResponse) { + for _, pkg := range r.Packages { + response.addPackage(pkg) + for _, name := range queries { + if pkg.Name == name { + response.addRoot(pkg.ID) + break + } + } + } + } + + if len(simpleMatches) != 0 { + resp, err := driver(cfg, simpleMatches...) + if err != nil { + return err + } + addResponse(resp) + } + + // Module cache matches are tricky. We want to avoid downloading new + // versions of things, so we need to use the ones present in the cache. + // go list doesn't accept version specifiers, so we have to write out a + // temporary module, and do the list in that module. + if len(modCacheMatches) != 0 { + // Collect all the matches, deduplicating by major version + // and preferring the newest. + type modInfo struct { + mod string + major string + } + mods := make(map[modInfo]string) + var imports []string + for _, modPath := range modCacheMatches { + matches := modCacheRegexp.FindStringSubmatch(modPath) + mod, ver := filepath.ToSlash(matches[1]), matches[2] + importPath := filepath.ToSlash(filepath.Join(matches[1], matches[3])) + + major := semver.Major(ver) + if prevVer, ok := mods[modInfo{mod, major}]; !ok || semver.Compare(ver, prevVer) > 0 { + mods[modInfo{mod, major}] = ver + } + + imports = append(imports, importPath) + } + + // Build the temporary module. + var gomod bytes.Buffer + gomod.WriteString("module modquery\nrequire (\n") + for mod, version := range mods { + gomod.WriteString("\t" + mod.mod + " " + version + "\n") + } + gomod.WriteString(")\n") + + tmpCfg := *cfg + + // We're only trying to look at stuff in the module cache, so + // disable the network. This should speed things up, and has + // prevented errors in at least one case, #28518. + tmpCfg.Env = append(append([]string{"GOPROXY=off"}, cfg.Env...)) + + var err error + tmpCfg.Dir, err = ioutil.TempDir("", "gopackages-modquery") + if err != nil { + return err + } + defer os.RemoveAll(tmpCfg.Dir) + + if err := ioutil.WriteFile(filepath.Join(tmpCfg.Dir, "go.mod"), gomod.Bytes(), 0777); err != nil { + return fmt.Errorf("writing go.mod for module cache query: %v", err) + } + + // Run the query, using the import paths calculated from the matches above. + resp, err := driver(&tmpCfg, imports...) + if err != nil { + return fmt.Errorf("querying module cache matches: %v", err) + } + addResponse(resp) + } + + return nil +} + +func getSizes(cfg *Config) (types.Sizes, error) { + return packagesdriver.GetSizesGolist(cfg.Context, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg)) +} + +// roots selects the appropriate paths to walk based on the passed-in configuration, +// particularly the environment and the presence of a go.mod in cfg.Dir's parents. +func roots(cfg *Config) ([]gopathwalk.Root, string, error) { + stdout, err := invokeGo(cfg, "env", "GOROOT", "GOPATH", "GOMOD") + if err != nil { + return nil, "", err + } + + fields := strings.Split(stdout.String(), "\n") + if len(fields) != 4 || len(fields[3]) != 0 { + return nil, "", fmt.Errorf("go env returned unexpected output: %q", stdout.String()) + } + goroot, gopath, gomod := fields[0], filepath.SplitList(fields[1]), fields[2] + var modDir string + if gomod != "" { + modDir = filepath.Dir(gomod) + } + + var roots []gopathwalk.Root + // Always add GOROOT. + roots = append(roots, gopathwalk.Root{filepath.Join(goroot, "/src"), gopathwalk.RootGOROOT}) + // If modules are enabled, scan the module dir. + if modDir != "" { + roots = append(roots, gopathwalk.Root{modDir, gopathwalk.RootCurrentModule}) + } + // Add either GOPATH/src or GOPATH/pkg/mod, depending on module mode. + for _, p := range gopath { + if modDir != "" { + roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache}) + } else { + roots = append(roots, gopathwalk.Root{filepath.Join(p, "/src"), gopathwalk.RootGOPATH}) + } + } + + return roots, modDir, nil +} + +// These functions were copied from goimports. See further documentation there. + +// pathMatchesQueries is adapted from pkgIsCandidate. +// TODO: is it reasonable to do Contains here, rather than an exact match on a path component? +func pathMatchesQueries(path string, queries []string) bool { + lastTwo := lastTwoComponents(path) + for _, query := range queries { + if strings.Contains(lastTwo, query) { + return true + } + if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(query) { + lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) + if strings.Contains(lastTwo, query) { + return true + } + } + } + return false +} + +// lastTwoComponents returns at most the last two path components +// of v, using either / or \ as the path separator. +func lastTwoComponents(v string) string { + nslash := 0 + for i := len(v) - 1; i >= 0; i-- { + if v[i] == '/' || v[i] == '\\' { + nslash++ + if nslash == 2 { + return v[i:] + } + } + } + return v +} + +func hasHyphenOrUpperASCII(s string) bool { + for i := 0; i < len(s); i++ { + b := s[i] + if b == '-' || ('A' <= b && b <= 'Z') { + return true + } + } + return false +} + +func lowerASCIIAndRemoveHyphen(s string) (ret string) { + buf := make([]byte, 0, len(s)) + for i := 0; i < len(s); i++ { + b := s[i] + switch { + case b == '-': + continue + case 'A' <= b && b <= 'Z': + buf = append(buf, b+('a'-'A')) + default: + buf = append(buf, b) + } + } + return string(buf) +} + +// Fields must match go list; +// see $GOROOT/src/cmd/go/internal/load/pkg.go. +type jsonPackage struct { + ImportPath string + Dir string + Name string + Export string + GoFiles []string + CompiledGoFiles []string + CFiles []string + CgoFiles []string + CXXFiles []string + MFiles []string + HFiles []string + FFiles []string + SFiles []string + SwigFiles []string + SwigCXXFiles []string + SysoFiles []string + Imports []string + ImportMap map[string]string + Deps []string + TestGoFiles []string + TestImports []string + XTestGoFiles []string + XTestImports []string + ForTest string // q in a "p [q.test]" package, else "" + DepOnly bool + + Error *jsonPackageError +} + +type jsonPackageError struct { + ImportStack []string + Pos string + Err string +} + +func otherFiles(p *jsonPackage) [][]string { + return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} +} + +// golistDriver uses the "go list" command to expand the pattern +// words and return metadata for the specified packages. dir may be +// "" and env may be nil, as per os/exec.Command. +func golistDriver(cfg *Config, words ...string) (*driverResponse, error) { + // go list uses the following identifiers in ImportPath and Imports: + // + // "p" -- importable package or main (command) + // "q.test" -- q's test executable + // "p [q.test]" -- variant of p as built for q's test executable + // "q_test [q.test]" -- q's external test package + // + // The packages p that are built differently for a test q.test + // are q itself, plus any helpers used by the external test q_test, + // typically including "testing" and all its dependencies. + + // Run "go list" for complete + // information on the specified packages. + buf, err := invokeGo(cfg, golistargs(cfg, words)...) + if err != nil { + return nil, err + } + seen := make(map[string]*jsonPackage) + // Decode the JSON and convert it to Package form. + var response driverResponse + for dec := json.NewDecoder(buf); dec.More(); { + p := new(jsonPackage) + if err := dec.Decode(p); err != nil { + return nil, fmt.Errorf("JSON decoding failed: %v", err) + } + + if p.ImportPath == "" { + // The documentation for go list says that “[e]rroneous packages will have + // a non-empty ImportPath”. If for some reason it comes back empty, we + // prefer to error out rather than silently discarding data or handing + // back a package without any way to refer to it. + if p.Error != nil { + return nil, Error{ + Pos: p.Error.Pos, + Msg: p.Error.Err, + } + } + return nil, fmt.Errorf("package missing import path: %+v", p) + } + + if old, found := seen[p.ImportPath]; found { + if !reflect.DeepEqual(p, old) { + return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) + } + // skip the duplicate + continue + } + seen[p.ImportPath] = p + + pkg := &Package{ + Name: p.Name, + ID: p.ImportPath, + GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), + CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), + OtherFiles: absJoin(p.Dir, otherFiles(p)...), + } + + // Work around https://golang.org/issue/28749: + // cmd/go puts assembly, C, and C++ files in CompiledGoFiles. + // Filter out any elements of CompiledGoFiles that are also in OtherFiles. + // We have to keep this workaround in place until go1.12 is a distant memory. + if len(pkg.OtherFiles) > 0 { + other := make(map[string]bool, len(pkg.OtherFiles)) + for _, f := range pkg.OtherFiles { + other[f] = true + } + + out := pkg.CompiledGoFiles[:0] + for _, f := range pkg.CompiledGoFiles { + if other[f] { + continue + } + out = append(out, f) + } + pkg.CompiledGoFiles = out + } + + // Extract the PkgPath from the package's ID. + if i := strings.IndexByte(pkg.ID, ' '); i >= 0 { + pkg.PkgPath = pkg.ID[:i] + } else { + pkg.PkgPath = pkg.ID + } + + if pkg.PkgPath == "unsafe" { + pkg.GoFiles = nil // ignore fake unsafe.go file + } + + // Assume go list emits only absolute paths for Dir. + if p.Dir != "" && !filepath.IsAbs(p.Dir) { + log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir) + } + + if p.Export != "" && !filepath.IsAbs(p.Export) { + pkg.ExportFile = filepath.Join(p.Dir, p.Export) + } else { + pkg.ExportFile = p.Export + } + + // imports + // + // Imports contains the IDs of all imported packages. + // ImportsMap records (path, ID) only where they differ. + ids := make(map[string]bool) + for _, id := range p.Imports { + ids[id] = true + } + pkg.Imports = make(map[string]*Package) + for path, id := range p.ImportMap { + pkg.Imports[path] = &Package{ID: id} // non-identity import + delete(ids, id) + } + for id := range ids { + if id == "C" { + continue + } + + pkg.Imports[id] = &Package{ID: id} // identity import + } + if !p.DepOnly { + response.Roots = append(response.Roots, pkg.ID) + } + + // Work around for pre-go.1.11 versions of go list. + // TODO(matloob): they should be handled by the fallback. + // Can we delete this? + if len(pkg.CompiledGoFiles) == 0 { + pkg.CompiledGoFiles = pkg.GoFiles + } + + if p.Error != nil { + pkg.Errors = append(pkg.Errors, Error{ + Pos: p.Error.Pos, + Msg: p.Error.Err, + }) + } + + response.Packages = append(response.Packages, pkg) + } + + return &response, nil +} + +// absJoin absolutizes and flattens the lists of files. +func absJoin(dir string, fileses ...[]string) (res []string) { + for _, files := range fileses { + for _, file := range files { + if !filepath.IsAbs(file) { + file = filepath.Join(dir, file) + } + res = append(res, file) + } + } + return res +} + +func golistargs(cfg *Config, words []string) []string { + const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo + fullargs := []string{ + "list", "-e", "-json", + fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0), + fmt.Sprintf("-test=%t", cfg.Tests), + fmt.Sprintf("-export=%t", usesExportData(cfg)), + fmt.Sprintf("-deps=%t", cfg.Mode&NeedDeps != 0), + // go list doesn't let you pass -test and -find together, + // probably because you'd just get the TestMain. + fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0), + } + fullargs = append(fullargs, cfg.BuildFlags...) + fullargs = append(fullargs, "--") + fullargs = append(fullargs, words...) + return fullargs +} + +// invokeGo returns the stdout of a go command invocation. +func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { + stdout := new(bytes.Buffer) + stderr := new(bytes.Buffer) + cmd := exec.CommandContext(cfg.Context, "go", args...) + // On darwin the cwd gets resolved to the real path, which breaks anything that + // expects the working directory to keep the original path, including the + // go command when dealing with modules. + // The Go stdlib has a special feature where if the cwd and the PWD are the + // same node then it trusts the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go command. + cmd.Env = append(append([]string{}, cfg.Env...), "PWD="+cfg.Dir) + cmd.Dir = cfg.Dir + cmd.Stdout = stdout + cmd.Stderr = stderr + if debug { + defer func(start time.Time) { + log.Printf("%s for %v, stderr: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr) + }(time.Now()) + } + + if err := cmd.Run(); err != nil { + // Check for 'go' executable not being found. + if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound) + } + + exitErr, ok := err.(*exec.ExitError) + if !ok { + // Catastrophic error: + // - context cancellation + return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err) + } + + // Old go version? + if strings.Contains(stderr.String(), "flag provided but not defined") { + return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)} + } + + // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show + // the error in the Err section of stdout in case -e option is provided. + // This fix is provided for backwards compatibility. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Export mode entails a build. + // If that build fails, errors appear on stderr + // (despite the -e flag) and the Export field is blank. + // Do not fail in that case. + // The same is true if an ad-hoc package given to go list doesn't exist. + // TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when + // packages don't exist or a build fails. + if !usesExportData(cfg) && !containsGoFile(args) { + return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr) + } + } + + // As of writing, go list -export prints some non-fatal compilation + // errors to stderr, even with -e set. We would prefer that it put + // them in the Package.Error JSON (see https://golang.org/issue/26319). + // In the meantime, there's nowhere good to put them, but they can + // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS + // is set. + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, args...), stderr) + } + + // debugging + if false { + fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(cmd, args...), stdout) + } + + return stdout, nil +} + +func containsGoFile(s []string) bool { + for _, f := range s { + if strings.HasSuffix(f, ".go") { + return true + } + } + return false +} + +func cmdDebugStr(cmd *exec.Cmd, args ...string) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.Split(kv, "=") + k, v := split[0], split[1] + env[k] = v + } + var quotedArgs []string + for _, arg := range args { + quotedArgs = append(quotedArgs, strconv.Quote(arg)) + } + + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %s", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], strings.Join(quotedArgs, " ")) +} diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go new file mode 100644 index 00000000..33a0a28f --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -0,0 +1,138 @@ +package packages + +import ( + "go/parser" + "go/token" + "path/filepath" + "strconv" + "strings" +) + +// processGolistOverlay provides rudimentary support for adding +// files that don't exist on disk to an overlay. The results can be +// sometimes incorrect. +// TODO(matloob): Handle unsupported cases, including the following: +// - test files +// - adding test and non-test files to test variants of packages +// - determining the correct package to add given a new import path +// - creating packages that don't exist +func processGolistOverlay(cfg *Config, response *driverResponse) (modifiedPkgs, needPkgs []string, err error) { + havePkgs := make(map[string]string) // importPath -> non-test package ID + needPkgsSet := make(map[string]bool) + modifiedPkgsSet := make(map[string]bool) + + for _, pkg := range response.Packages { + // This is an approximation of import path to id. This can be + // wrong for tests, vendored packages, and a number of other cases. + havePkgs[pkg.PkgPath] = pkg.ID + } + +outer: + for path, contents := range cfg.Overlay { + base := filepath.Base(path) + if strings.HasSuffix(path, "_test.go") { + // Overlays don't support adding new test files yet. + // TODO(matloob): support adding new test files. + continue + } + dir := filepath.Dir(path) + for _, pkg := range response.Packages { + var dirContains, fileExists bool + for _, f := range pkg.GoFiles { + if sameFile(filepath.Dir(f), dir) { + dirContains = true + } + if filepath.Base(f) == base { + fileExists = true + } + } + // The overlay could have included an entirely new package. + isNewPackage := extractPackage(pkg, path, contents) + if dirContains || isNewPackage { + if !fileExists { + pkg.GoFiles = append(pkg.GoFiles, path) // TODO(matloob): should the file just be added to GoFiles? + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, path) + modifiedPkgsSet[pkg.ID] = true + } + imports, err := extractImports(path, contents) + if err != nil { + // Let the parser or type checker report errors later. + continue outer + } + for _, imp := range imports { + _, found := pkg.Imports[imp] + if !found { + needPkgsSet[imp] = true + // TODO(matloob): Handle cases when the following block isn't correct. + // These include imports of test variants, imports of vendored packages, etc. + id, ok := havePkgs[imp] + if !ok { + id = imp + } + pkg.Imports[imp] = &Package{ID: id} + } + } + continue outer + } + } + } + + needPkgs = make([]string, 0, len(needPkgsSet)) + for pkg := range needPkgsSet { + needPkgs = append(needPkgs, pkg) + } + modifiedPkgs = make([]string, 0, len(modifiedPkgsSet)) + for pkg := range modifiedPkgsSet { + modifiedPkgs = append(modifiedPkgs, pkg) + } + return modifiedPkgs, needPkgs, err +} + +func extractImports(filename string, contents []byte) ([]string, error) { + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset? + if err != nil { + return nil, err + } + var res []string + for _, imp := range f.Imports { + quotedPath := imp.Path.Value + path, err := strconv.Unquote(quotedPath) + if err != nil { + return nil, err + } + res = append(res, path) + } + return res, nil +} + +// extractPackage attempts to extract a package defined in an overlay. +// +// If the package has errors and has no Name, GoFiles, or Imports, +// then it's possible that it doesn't yet exist on disk. +func extractPackage(pkg *Package, filename string, contents []byte) bool { + // TODO(rstambler): Check the message of the actual error? + // It differs between $GOPATH and module mode. + if len(pkg.Errors) != 1 { + return false + } + if pkg.Name != "" || pkg.ExportFile != "" { + return false + } + if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 { + return false + } + if len(pkg.Imports) > 0 { + return false + } + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset? + if err != nil { + return false + } + // TODO(rstambler): This doesn't work for main packages. + if filepath.Base(pkg.PkgPath) != f.Name.Name { + return false + } + pkg.Name = f.Name.Name + pkg.Errors = nil + return true +} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go new file mode 100644 index 00000000..eedd43bb --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -0,0 +1,1084 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +// See doc.go for package documentation and implementation notes. + +import ( + "context" + "encoding/json" + "fmt" + "go/ast" + "go/parser" + "go/scanner" + "go/token" + "go/types" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "sync" + + "golang.org/x/tools/go/gcexportdata" +) + +// A LoadMode specifies the amount of detail to return when loading. +// Higher-numbered modes cause Load to return more information, +// but may be slower. Load may return more information than requested. +type LoadMode int + +const ( + // The following constants are used to specify which fields of the Package + // should be filled when loading is done. As a special case to provide + // backwards compatibility, a LoadMode of 0 is equivalent to LoadFiles. + // For all other LoadModes, the bits below specify which fields will be filled + // in the result packages. + // WARNING: This part of the go/packages API is EXPERIMENTAL. It might + // be changed or removed up until April 15 2019. After that date it will + // be frozen. + // TODO(matloob): Remove this comment on April 15. + + // ID and Errors (if present) will always be filled. + + // NeedName adds Name and PkgPath. + NeedName LoadMode = 1 << iota + + // NeedFiles adds GoFiles and OtherFiles. + NeedFiles + + // NeedCompiledGoFiles adds CompiledGoFiles. + NeedCompiledGoFiles + + // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain + // "placeholder" Packages with only the ID set. + NeedImports + + // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. If NeedImports + // is not set NeedDeps has no effect. + NeedDeps + + // NeedExportsFile adds ExportsFile. + NeedExportsFile + + // NeedTypes adds Types, Fset, and IllTyped. + NeedTypes + + // NeedSyntax adds Syntax. + NeedSyntax + + // NeedTypesInfo adds TypesInfo. + NeedTypesInfo + + // NeedTypesSizes adds TypesSizes. + NeedTypesSizes +) + +const ( + // LoadFiles finds the packages and computes their source file lists. + // Package fields: ID, Name, Errors, GoFiles, CompiledGoFiles, and OtherFiles. + LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles + + // LoadImports adds import information for each package + // and its dependencies. + // Package fields added: Imports. + LoadImports = LoadFiles | NeedImports | NeedDeps + + // LoadTypes adds type information for package-level + // declarations in the packages matching the patterns. + // Package fields added: Types, TypesSizes, Fset, and IllTyped. + // This mode uses type information provided by the build system when + // possible, and may fill in the ExportFile field. + LoadTypes = LoadImports | NeedTypes | NeedTypesSizes + + // LoadSyntax adds typed syntax trees for the packages matching the patterns. + // Package fields added: Syntax, and TypesInfo, for direct pattern matches only. + LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo + + // LoadAllSyntax adds typed syntax trees for the packages matching the patterns + // and all dependencies. + // Package fields added: Types, Fset, IllTyped, Syntax, and TypesInfo, + // for all packages in the import graph. + LoadAllSyntax = LoadSyntax +) + +// A Config specifies details about how packages should be loaded. +// The zero value is a valid configuration. +// Calls to Load do not modify this struct. +type Config struct { + // Mode controls the level of information returned for each package. + Mode LoadMode + + // Context specifies the context for the load operation. + // If the context is cancelled, the loader may stop early + // and return an ErrCancelled error. + // If Context is nil, the load cannot be cancelled. + Context context.Context + + // Dir is the directory in which to run the build system's query tool + // that provides information about the packages. + // If Dir is empty, the tool is run in the current directory. + Dir string + + // Env is the environment to use when invoking the build system's query tool. + // If Env is nil, the current environment is used. + // As in os/exec's Cmd, only the last value in the slice for + // each environment key is used. To specify the setting of only + // a few variables, append to the current environment, as in: + // + // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386") + // + Env []string + + // BuildFlags is a list of command-line flags to be passed through to + // the build system's query tool. + BuildFlags []string + + // Fset provides source position information for syntax trees and types. + // If Fset is nil, Load will use a new fileset, but preserve Fset's value. + Fset *token.FileSet + + // ParseFile is called to read and parse each file + // when preparing a package's type-checked syntax tree. + // It must be safe to call ParseFile simultaneously from multiple goroutines. + // If ParseFile is nil, the loader will uses parser.ParseFile. + // + // ParseFile should parse the source from src and use filename only for + // recording position information. + // + // An application may supply a custom implementation of ParseFile + // to change the effective file contents or the behavior of the parser, + // or to modify the syntax tree. For example, selectively eliminating + // unwanted function bodies can significantly accelerate type checking. + ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) + + // If Tests is set, the loader includes not just the packages + // matching a particular pattern but also any related test packages, + // including test-only variants of the package and the test executable. + // + // For example, when using the go command, loading "fmt" with Tests=true + // returns four packages, with IDs "fmt" (the standard package), + // "fmt [fmt.test]" (the package as compiled for the test), + // "fmt_test" (the test functions from source files in package fmt_test), + // and "fmt.test" (the test binary). + // + // In build systems with explicit names for tests, + // setting Tests may have no effect. + Tests bool + + // Overlay provides a mapping of absolute file paths to file contents. + // If the file with the given path already exists, the parser will use the + // alternative file contents provided by the map. + // + // Overlays provide incomplete support for when a given file doesn't + // already exist on disk. See the package doc above for more details. + Overlay map[string][]byte +} + +// driver is the type for functions that query the build system for the +// packages named by the patterns. +type driver func(cfg *Config, patterns ...string) (*driverResponse, error) + +// driverResponse contains the results for a driver query. +type driverResponse struct { + // Sizes, if not nil, is the types.Sizes to use when type checking. + Sizes *types.StdSizes + + // Roots is the set of package IDs that make up the root packages. + // We have to encode this separately because when we encode a single package + // we cannot know if it is one of the roots as that requires knowledge of the + // graph it is part of. + Roots []string `json:",omitempty"` + + // Packages is the full set of packages in the graph. + // The packages are not connected into a graph. + // The Imports if populated will be stubs that only have their ID set. + // Imports will be connected and then type and syntax information added in a + // later pass (see refine). + Packages []*Package +} + +// Load loads and returns the Go packages named by the given patterns. +// +// Config specifies loading options; +// nil behaves the same as an empty Config. +// +// Load returns an error if any of the patterns was invalid +// as defined by the underlying build system. +// It may return an empty list of packages without an error, +// for instance for an empty expansion of a valid wildcard. +// Errors associated with a particular package are recorded in the +// corresponding Package's Errors list, and do not cause Load to +// return an error. Clients may need to handle such errors before +// proceeding with further analysis. The PrintErrors function is +// provided for convenient display of all errors. +func Load(cfg *Config, patterns ...string) ([]*Package, error) { + l := newLoader(cfg) + response, err := defaultDriver(&l.Config, patterns...) + if err != nil { + return nil, err + } + l.sizes = response.Sizes + return l.refine(response.Roots, response.Packages...) +} + +// defaultDriver is a driver that looks for an external driver binary, and if +// it does not find it falls back to the built in go list driver. +func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + driver := findExternalDriver(cfg) + if driver == nil { + driver = goListDriver + } + return driver(cfg, patterns...) +} + +// A Package describes a loaded Go package. +type Package struct { + // ID is a unique identifier for a package, + // in a syntax provided by the underlying build system. + // + // Because the syntax varies based on the build system, + // clients should treat IDs as opaque and not attempt to + // interpret them. + ID string + + // Name is the package name as it appears in the package source code. + Name string + + // PkgPath is the package path as used by the go/types package. + PkgPath string + + // Errors contains any errors encountered querying the metadata + // of the package, or while parsing or type-checking its files. + Errors []Error + + // GoFiles lists the absolute file paths of the package's Go source files. + GoFiles []string + + // CompiledGoFiles lists the absolute file paths of the package's source + // files that were presented to the compiler. + // This may differ from GoFiles if files are processed before compilation. + CompiledGoFiles []string + + // OtherFiles lists the absolute file paths of the package's non-Go source files, + // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on. + OtherFiles []string + + // ExportFile is the absolute path to a file containing type + // information for the package as provided by the build system. + ExportFile string + + // Imports maps import paths appearing in the package's Go source files + // to corresponding loaded Packages. + Imports map[string]*Package + + // Types provides type information for the package. + // Modes LoadTypes and above set this field for packages matching the + // patterns; type information for dependencies may be missing or incomplete. + // Mode LoadAllSyntax sets this field for all packages, including dependencies. + Types *types.Package + + // Fset provides position information for Types, TypesInfo, and Syntax. + // It is set only when Types is set. + Fset *token.FileSet + + // IllTyped indicates whether the package or any dependency contains errors. + // It is set only when Types is set. + IllTyped bool + + // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles. + // + // Mode LoadSyntax sets this field for packages matching the patterns. + // Mode LoadAllSyntax sets this field for all packages, including dependencies. + Syntax []*ast.File + + // TypesInfo provides type information about the package's syntax trees. + // It is set only when Syntax is set. + TypesInfo *types.Info + + // TypesSizes provides the effective size function for types in TypesInfo. + TypesSizes types.Sizes +} + +// An Error describes a problem with a package's metadata, syntax, or types. +type Error struct { + Pos string // "file:line:col" or "file:line" or "" or "-" + Msg string + Kind ErrorKind +} + +// ErrorKind describes the source of the error, allowing the user to +// differentiate between errors generated by the driver, the parser, or the +// type-checker. +type ErrorKind int + +const ( + UnknownError ErrorKind = iota + ListError + ParseError + TypeError +) + +func (err Error) Error() string { + pos := err.Pos + if pos == "" { + pos = "-" // like token.Position{}.String() + } + return pos + ": " + err.Msg +} + +// flatPackage is the JSON form of Package +// It drops all the type and syntax fields, and transforms the Imports +// +// TODO(adonovan): identify this struct with Package, effectively +// publishing the JSON protocol. +type flatPackage struct { + ID string + Name string `json:",omitempty"` + PkgPath string `json:",omitempty"` + Errors []Error `json:",omitempty"` + GoFiles []string `json:",omitempty"` + CompiledGoFiles []string `json:",omitempty"` + OtherFiles []string `json:",omitempty"` + ExportFile string `json:",omitempty"` + Imports map[string]string `json:",omitempty"` +} + +// MarshalJSON returns the Package in its JSON form. +// For the most part, the structure fields are written out unmodified, and +// the type and syntax fields are skipped. +// The imports are written out as just a map of path to package id. +// The errors are written using a custom type that tries to preserve the +// structure of error types we know about. +// +// This method exists to enable support for additional build systems. It is +// not intended for use by clients of the API and we may change the format. +func (p *Package) MarshalJSON() ([]byte, error) { + flat := &flatPackage{ + ID: p.ID, + Name: p.Name, + PkgPath: p.PkgPath, + Errors: p.Errors, + GoFiles: p.GoFiles, + CompiledGoFiles: p.CompiledGoFiles, + OtherFiles: p.OtherFiles, + ExportFile: p.ExportFile, + } + if len(p.Imports) > 0 { + flat.Imports = make(map[string]string, len(p.Imports)) + for path, ipkg := range p.Imports { + flat.Imports[path] = ipkg.ID + } + } + return json.Marshal(flat) +} + +// UnmarshalJSON reads in a Package from its JSON format. +// See MarshalJSON for details about the format accepted. +func (p *Package) UnmarshalJSON(b []byte) error { + flat := &flatPackage{} + if err := json.Unmarshal(b, &flat); err != nil { + return err + } + *p = Package{ + ID: flat.ID, + Name: flat.Name, + PkgPath: flat.PkgPath, + Errors: flat.Errors, + GoFiles: flat.GoFiles, + CompiledGoFiles: flat.CompiledGoFiles, + OtherFiles: flat.OtherFiles, + ExportFile: flat.ExportFile, + } + if len(flat.Imports) > 0 { + p.Imports = make(map[string]*Package, len(flat.Imports)) + for path, id := range flat.Imports { + p.Imports[path] = &Package{ID: id} + } + } + return nil +} + +func (p *Package) String() string { return p.ID } + +// loaderPackage augments Package with state used during the loading phase +type loaderPackage struct { + *Package + importErrors map[string]error // maps each bad import to its error + loadOnce sync.Once + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern +} + +// loader holds the working state of a single call to load. +type loader struct { + pkgs map[string]*loaderPackage + Config + sizes types.Sizes + parseCache map[string]*parseValue + parseCacheMu sync.Mutex + exportMu sync.Mutex // enforces mutual exclusion of exportdata operations + + // TODO(matloob): Add an implied mode here and use that instead of mode. + // Implied mode would contain all the fields we need the data for so we can + // get the actually requested fields. We'll zero them out before returning + // packages to the user. This will make it easier for us to get the conditions + // where we need certain modes right. +} + +type parseValue struct { + f *ast.File + err error + ready chan struct{} +} + +func newLoader(cfg *Config) *loader { + ld := &loader{ + parseCache: map[string]*parseValue{}, + } + if cfg != nil { + ld.Config = *cfg + } + if ld.Config.Mode == 0 { + ld.Config.Mode = LoadFiles // Preserve zero behavior of Mode for backwards compatibility. + } + if ld.Config.Env == nil { + ld.Config.Env = os.Environ() + } + if ld.Context == nil { + ld.Context = context.Background() + } + if ld.Dir == "" { + if dir, err := os.Getwd(); err == nil { + ld.Dir = dir + } + } + + if ld.Mode&NeedTypes != 0 { + if ld.Fset == nil { + ld.Fset = token.NewFileSet() + } + + // ParseFile is required even in LoadTypes mode + // because we load source if export data is missing. + if ld.ParseFile == nil { + ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + const mode = parser.AllErrors | parser.ParseComments + return parser.ParseFile(fset, filename, src, mode) + } + } + } + return ld +} + +// refine connects the supplied packages into a graph and then adds type and +// and syntax information as requested by the LoadMode. +func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { + rootMap := make(map[string]int, len(roots)) + for i, root := range roots { + rootMap[root] = i + } + ld.pkgs = make(map[string]*loaderPackage) + // first pass, fixup and build the map and roots + var initial = make([]*loaderPackage, len(roots)) + for _, pkg := range list { + rootIndex := -1 + if i, found := rootMap[pkg.ID]; found { + rootIndex = i + } + lpkg := &loaderPackage{ + Package: pkg, + needtypes: (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && rootIndex < 0) || rootIndex >= 0, + needsrc: (ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && rootIndex < 0) || rootIndex >= 0 || + len(ld.Overlay) > 0 || // Overlays can invalidate export data. TODO(matloob): make this check fine-grained based on dependencies on overlaid files + pkg.ExportFile == "" && pkg.PkgPath != "unsafe", + } + ld.pkgs[lpkg.ID] = lpkg + if rootIndex >= 0 { + initial[rootIndex] = lpkg + lpkg.initial = true + } + } + for i, root := range roots { + if initial[i] == nil { + return nil, fmt.Errorf("root package %v is missing", root) + } + } + + // Materialize the import graph. + + const ( + white = 0 // new + grey = 1 // in progress + black = 2 // complete + ) + + // visit traverses the import graph, depth-first, + // and materializes the graph as Packages.Imports. + // + // Valid imports are saved in the Packages.Import map. + // Invalid imports (cycles and missing nodes) are saved in the importErrors map. + // Thus, even in the presence of both kinds of errors, the Import graph remains a DAG. + // + // visit returns whether the package needs src or has a transitive + // dependency on a package that does. These are the only packages + // for which we load source code. + var stack []*loaderPackage + var visit func(lpkg *loaderPackage) bool + var srcPkgs []*loaderPackage + visit = func(lpkg *loaderPackage) bool { + switch lpkg.color { + case black: + return lpkg.needsrc + case grey: + panic("internal error: grey node") + } + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) + } + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) + } + lpkg.importErrors[importPath] = importErr + continue + } + + if visit(imp) { + lpkg.needsrc = true + } + lpkg.Imports[importPath] = imp.Package + } + if lpkg.needsrc { + srcPkgs = append(srcPkgs, lpkg) + } + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } + stack = stack[:len(stack)-1] // pop + lpkg.color = black + + return lpkg.needsrc + } + + if ld.Mode&(NeedImports|NeedDeps) == 0 { + // We do this to drop the stub import packages that we are not even going to try to resolve. + for _, lpkg := range initial { + lpkg.Imports = nil + } + } else { + // For each initial package, create its import DAG. + for _, lpkg := range initial { + visit(lpkg) + } + } + if ld.Mode&NeedDeps != 0 { // TODO(matloob): This is only the case if NeedTypes is also set, right? + for _, lpkg := range srcPkgs { + // Complete type information is required for the + // immediate dependencies of each source package. + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + imp.needtypes = true + } + } + } + // Load type data if needed, starting at + // the initial packages (roots of the import DAG). + if ld.Mode&NeedTypes != 0 { + var wg sync.WaitGroup + for _, lpkg := range initial { + wg.Add(1) + go func(lpkg *loaderPackage) { + ld.loadRecursive(lpkg) + wg.Done() + }(lpkg) + } + wg.Wait() + } + + result := make([]*Package, len(initial)) + importPlaceholders := make(map[string]*Package) + for i, lpkg := range initial { + result[i] = lpkg.Package + } + for i := range ld.pkgs { + // Clear all unrequested fields, for extra de-Hyrum-ization. + if ld.Mode&NeedName == 0 { + ld.pkgs[i].Name = "" + ld.pkgs[i].PkgPath = "" + } + if ld.Mode&NeedFiles == 0 { + ld.pkgs[i].GoFiles = nil + ld.pkgs[i].OtherFiles = nil + } + if ld.Mode&NeedCompiledGoFiles == 0 { + ld.pkgs[i].CompiledGoFiles = nil + } + if ld.Mode&NeedImports == 0 { + ld.pkgs[i].Imports = nil + } + if ld.Mode&NeedExportsFile == 0 { + ld.pkgs[i].ExportFile = "" + } + if ld.Mode&NeedTypes == 0 { + ld.pkgs[i].Types = nil + ld.pkgs[i].Fset = nil + ld.pkgs[i].IllTyped = false + } + if ld.Mode&NeedSyntax == 0 { + ld.pkgs[i].Syntax = nil + } + if ld.Mode&NeedTypesInfo == 0 { + ld.pkgs[i].TypesInfo = nil + } + if ld.Mode&NeedTypesSizes == 0 { + ld.pkgs[i].TypesSizes = nil + } + if ld.Mode&NeedDeps == 0 { + for j, pkg := range ld.pkgs[i].Imports { + ph, ok := importPlaceholders[pkg.ID] + if !ok { + ph = &Package{ID: pkg.ID} + importPlaceholders[pkg.ID] = ph + } + ld.pkgs[i].Imports[j] = ph + } + } + } + return result, nil +} + +// loadRecursive loads the specified package and its dependencies, +// recursively, in parallel, in topological order. +// It is atomic and idempotent. +// Precondition: ld.Mode&NeedTypes. +func (ld *loader) loadRecursive(lpkg *loaderPackage) { + lpkg.loadOnce.Do(func() { + // Load the direct dependencies, in parallel. + var wg sync.WaitGroup + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + wg.Add(1) + go func(imp *loaderPackage) { + ld.loadRecursive(imp) + wg.Done() + }(imp) + } + wg.Wait() + + ld.loadPackage(lpkg) + }) +} + +// loadPackage loads the specified package. +// It must be called only once per Package, +// after immediate dependencies are loaded. +// Precondition: ld.Mode >= LoadTypes. +func (ld *loader) loadPackage(lpkg *loaderPackage) { + if lpkg.PkgPath == "unsafe" { + // Fill in the blanks to avoid surprises. + lpkg.Types = types.Unsafe + lpkg.Fset = ld.Fset + lpkg.Syntax = []*ast.File{} + lpkg.TypesInfo = new(types.Info) + lpkg.TypesSizes = ld.sizes + return + } + + // Call NewPackage directly with explicit name. + // This avoids skew between golist and go/types when the files' + // package declarations are inconsistent. + lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name) + lpkg.Fset = ld.Fset + + // Subtle: we populate all Types fields with an empty Package + // before loading export data so that export data processing + // never has to create a types.Package for an indirect dependency, + // which would then require that such created packages be explicitly + // inserted back into the Import graph as a final step after export data loading. + // The Diamond test exercises this case. + if !lpkg.needtypes { + return + } + if !lpkg.needsrc { + ld.loadFromExportData(lpkg) + return // not a source package, don't get syntax trees + } + + appendError := func(err error) { + // Convert various error types into the one true Error. + var errs []Error + switch err := err.(type) { + case Error: + // from driver + errs = append(errs, err) + + case *os.PathError: + // from parser + errs = append(errs, Error{ + Pos: err.Path + ":1", + Msg: err.Err.Error(), + Kind: ParseError, + }) + + case scanner.ErrorList: + // from parser + for _, err := range err { + errs = append(errs, Error{ + Pos: err.Pos.String(), + Msg: err.Msg, + Kind: ParseError, + }) + } + + case types.Error: + // from type checker + errs = append(errs, Error{ + Pos: err.Fset.Position(err.Pos).String(), + Msg: err.Msg, + Kind: TypeError, + }) + + default: + // unexpected impoverished error from parser? + errs = append(errs, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, + }) + + // If you see this error message, please file a bug. + log.Printf("internal error: error %q (%T) without position", err, err) + } + + lpkg.Errors = append(lpkg.Errors, errs...) + } + + files, errs := ld.parseFiles(lpkg.CompiledGoFiles) + for _, err := range errs { + appendError(err) + } + + lpkg.Syntax = files + + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + lpkg.TypesSizes = ld.sizes + + importer := importerFunc(func(path string) (*types.Package, error) { + if path == "unsafe" { + return types.Unsafe, nil + } + + // The imports map is keyed by import path. + ipkg := lpkg.Imports[path] + if ipkg == nil { + if err := lpkg.importErrors[path]; err != nil { + return nil, err + } + // There was skew between the metadata and the + // import declarations, likely due to an edit + // race, or because the ParseFile feature was + // used to supply alternative file contents. + return nil, fmt.Errorf("no metadata for %s", path) + } + + if ipkg.Types != nil && ipkg.Types.Complete() { + return ipkg.Types, nil + } + log.Fatalf("internal error: nil Pkg importing %q from %q", path, lpkg) + panic("unreachable") + }) + + // type-check + tc := &types.Config{ + Importer: importer, + + // Type-check bodies of functions only in non-initial packages. + // Example: for import graph A->B->C and initial packages {A,C}, + // we can ignore function bodies in B. + IgnoreFuncBodies: (ld.Mode&(NeedDeps|NeedTypesInfo) == 0) && !lpkg.initial, + + Error: appendError, + Sizes: ld.sizes, + } + types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) + + lpkg.importErrors = nil // no longer needed + + // If !Cgo, the type-checker uses FakeImportC mode, so + // it doesn't invoke the importer for import "C", + // nor report an error for the import, + // or for any undefined C.f reference. + // We must detect this explicitly and correctly + // mark the package as IllTyped (by reporting an error). + // TODO(adonovan): if these errors are annoying, + // we could just set IllTyped quietly. + if tc.FakeImportC { + outer: + for _, f := range lpkg.Syntax { + for _, imp := range f.Imports { + if imp.Path.Value == `"C"` { + err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`} + appendError(err) + break outer + } + } + } + } + + // Record accumulated errors. + illTyped := len(lpkg.Errors) > 0 + if !illTyped { + for _, imp := range lpkg.Imports { + if imp.IllTyped { + illTyped = true + break + } + } + } + lpkg.IllTyped = illTyped +} + +// An importFunc is an implementation of the single-method +// types.Importer interface based on a function value. +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } + +// We use a counting semaphore to limit +// the number of parallel I/O calls per process. +var ioLimit = make(chan bool, 20) + +func (ld *loader) parseFile(filename string) (*ast.File, error) { + ld.parseCacheMu.Lock() + v, ok := ld.parseCache[filename] + if ok { + // cache hit + ld.parseCacheMu.Unlock() + <-v.ready + } else { + // cache miss + v = &parseValue{ready: make(chan struct{})} + ld.parseCache[filename] = v + ld.parseCacheMu.Unlock() + + var src []byte + for f, contents := range ld.Config.Overlay { + if sameFile(f, filename) { + src = contents + } + } + var err error + if src == nil { + ioLimit <- true // wait + src, err = ioutil.ReadFile(filename) + <-ioLimit // signal + } + if err != nil { + v.err = err + } else { + v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + } + + close(v.ready) + } + return v.f, v.err +} + +// parseFiles reads and parses the Go source files and returns the ASTs +// of the ones that could be at least partially parsed, along with a +// list of I/O and parse errors encountered. +// +// Because files are scanned in parallel, the token.Pos +// positions of the resulting ast.Files are not ordered. +// +func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { + var wg sync.WaitGroup + n := len(filenames) + parsed := make([]*ast.File, n) + errors := make([]error, n) + for i, file := range filenames { + if ld.Config.Context.Err() != nil { + parsed[i] = nil + errors[i] = ld.Config.Context.Err() + continue + } + wg.Add(1) + go func(i int, filename string) { + parsed[i], errors[i] = ld.parseFile(filename) + wg.Done() + }(i, file) + } + wg.Wait() + + // Eliminate nils, preserving order. + var o int + for _, f := range parsed { + if f != nil { + parsed[o] = f + o++ + } + } + parsed = parsed[:o] + + o = 0 + for _, err := range errors { + if err != nil { + errors[o] = err + o++ + } + } + errors = errors[:o] + + return parsed, errors +} + +// sameFile returns true if x and y have the same basename and denote +// the same file. +// +func sameFile(x, y string) bool { + if x == y { + // It could be the case that y doesn't exist. + // For instance, it may be an overlay file that + // hasn't been written to disk. To handle that case + // let x == y through. (We added the exact absolute path + // string to the CompiledGoFiles list, so the unwritten + // overlay case implies x==y.) + return true + } + if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation) + if xi, err := os.Stat(x); err == nil { + if yi, err := os.Stat(y); err == nil { + return os.SameFile(xi, yi) + } + } + } + return false +} + +// loadFromExportData returns type information for the specified +// package, loading it from an export data file on the first request. +func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) { + if lpkg.PkgPath == "" { + log.Fatalf("internal error: Package %s has no PkgPath", lpkg) + } + + // Because gcexportdata.Read has the potential to create or + // modify the types.Package for each node in the transitive + // closure of dependencies of lpkg, all exportdata operations + // must be sequential. (Finer-grained locking would require + // changes to the gcexportdata API.) + // + // The exportMu lock guards the Package.Pkg field and the + // types.Package it points to, for each Package in the graph. + // + // Not all accesses to Package.Pkg need to be protected by exportMu: + // graph ordering ensures that direct dependencies of source + // packages are fully loaded before the importer reads their Pkg field. + ld.exportMu.Lock() + defer ld.exportMu.Unlock() + + if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() { + return tpkg, nil // cache hit + } + + lpkg.IllTyped = true // fail safe + + if lpkg.ExportFile == "" { + // Errors while building export data will have been printed to stderr. + return nil, fmt.Errorf("no export data file") + } + f, err := os.Open(lpkg.ExportFile) + if err != nil { + return nil, err + } + defer f.Close() + + // Read gc export data. + // + // We don't currently support gccgo export data because all + // underlying workspaces use the gc toolchain. (Even build + // systems that support gccgo don't use it for workspace + // queries.) + r, err := gcexportdata.NewReader(f) + if err != nil { + return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + + // Build the view. + // + // The gcexportdata machinery has no concept of package ID. + // It identifies packages by their PkgPath, which although not + // globally unique is unique within the scope of one invocation + // of the linker, type-checker, or gcexportdata. + // + // So, we must build a PkgPath-keyed view of the global + // (conceptually ID-keyed) cache of packages and pass it to + // gcexportdata. The view must contain every existing + // package that might possibly be mentioned by the + // current package---its transitive closure. + // + // In loadPackage, we unconditionally create a types.Package for + // each dependency so that export data loading does not + // create new ones. + // + // TODO(adonovan): it would be simpler and more efficient + // if the export data machinery invoked a callback to + // get-or-create a package instead of a map. + // + view := make(map[string]*types.Package) // view seen by gcexportdata + seen := make(map[*loaderPackage]bool) // all visited packages + var visit func(pkgs map[string]*Package) + visit = func(pkgs map[string]*Package) { + for _, p := range pkgs { + lpkg := ld.pkgs[p.ID] + if !seen[lpkg] { + seen[lpkg] = true + view[lpkg.PkgPath] = lpkg.Types + visit(lpkg.Imports) + } + } + } + visit(lpkg.Imports) + + viewLen := len(view) + 1 // adding the self package + // Parse the export data. + // (May modify incomplete packages in view but not create new ones.) + tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath) + if err != nil { + return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + if viewLen != len(view) { + log.Fatalf("Unexpected package creation during export data loading") + } + + lpkg.Types = tpkg + lpkg.IllTyped = false + + return tpkg, nil +} + +func usesExportData(cfg *Config) bool { + return cfg.Mode&NeedExportsFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedTypesInfo == 0 +} diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go new file mode 100644 index 00000000..b13cb081 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -0,0 +1,55 @@ +package packages + +import ( + "fmt" + "os" + "sort" +) + +// Visit visits all the packages in the import graph whose roots are +// pkgs, calling the optional pre function the first time each package +// is encountered (preorder), and the optional post function after a +// package's dependencies have been visited (postorder). +// The boolean result of pre(pkg) determines whether +// the imports of package pkg are visited. +func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { + seen := make(map[*Package]bool) + var visit func(*Package) + visit = func(pkg *Package) { + if !seen[pkg] { + seen[pkg] = true + + if pre == nil || pre(pkg) { + paths := make([]string, 0, len(pkg.Imports)) + for path := range pkg.Imports { + paths = append(paths, path) + } + sort.Strings(paths) // Imports is a map, this makes visit stable + for _, path := range paths { + visit(pkg.Imports[path]) + } + } + + if post != nil { + post(pkg) + } + } + } + for _, pkg := range pkgs { + visit(pkg) + } +} + +// PrintErrors prints to os.Stderr the accumulated errors of all +// packages in the import graph rooted at pkgs, dependencies first. +// PrintErrors returns the number of errors printed. +func PrintErrors(pkgs []*Package) int { + var n int + Visit(pkgs, nil, func(pkg *Package) { + for _, err := range pkg.Errors { + fmt.Fprintln(os.Stderr, err) + n++ + } + }) + return n +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go new file mode 100644 index 00000000..38f596da --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go @@ -0,0 +1,46 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/ast/astutil" +) + +// Callee returns the named target of a function call, if any: +// a function, method, builtin, or variable. +func Callee(info *types.Info, call *ast.CallExpr) types.Object { + var obj types.Object + switch fun := astutil.Unparen(call.Fun).(type) { + case *ast.Ident: + obj = info.Uses[fun] // type, var, builtin, or declared func + case *ast.SelectorExpr: + if sel, ok := info.Selections[fun]; ok { + obj = sel.Obj() // method or field + } else { + obj = info.Uses[fun.Sel] // qualified identifier? + } + } + if _, ok := obj.(*types.TypeName); ok { + return nil // T(x) is a conversion, not a call + } + return obj +} + +// StaticCallee returns the target (function or method) of a static +// function call, if any. It returns nil for calls to builtins. +func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func { + if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) { + return f + } + return nil +} + +func interfaceMethod(f *types.Func) bool { + recv := f.Type().(*types.Signature).Recv() + return recv != nil && types.IsInterface(recv.Type()) +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/vendor/golang.org/x/tools/go/types/typeutil/imports.go new file mode 100644 index 00000000..9c441dba --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/imports.go @@ -0,0 +1,31 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import "go/types" + +// Dependencies returns all dependencies of the specified packages. +// +// Dependent packages appear in topological order: if package P imports +// package Q, Q appears earlier than P in the result. +// The algorithm follows import statements in the order they +// appear in the source code, so the result is a total order. +// +func Dependencies(pkgs ...*types.Package) []*types.Package { + var result []*types.Package + seen := make(map[*types.Package]bool) + var visit func(pkgs []*types.Package) + visit = func(pkgs []*types.Package) { + for _, p := range pkgs { + if !seen[p] { + seen[p] = true + visit(p.Imports()) + result = append(result, p) + } + } + } + visit(pkgs) + return result +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go new file mode 100644 index 00000000..c7f75450 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -0,0 +1,313 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeutil defines various utilities for types, such as Map, +// a mapping from types.Type to interface{} values. +package typeutil // import "golang.org/x/tools/go/types/typeutil" + +import ( + "bytes" + "fmt" + "go/types" + "reflect" +) + +// Map is a hash-table-based mapping from types (types.Type) to +// arbitrary interface{} values. The concrete types that implement +// the Type interface are pointers. Since they are not canonicalized, +// == cannot be used to check for equivalence, and thus we cannot +// simply use a Go map. +// +// Just as with map[K]V, a nil *Map is a valid empty map. +// +// Not thread-safe. +// +type Map struct { + hasher Hasher // shared by many Maps + table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused + length int // number of map entries +} + +// entry is an entry (key/value association) in a hash bucket. +type entry struct { + key types.Type + value interface{} +} + +// SetHasher sets the hasher used by Map. +// +// All Hashers are functionally equivalent but contain internal state +// used to cache the results of hashing previously seen types. +// +// A single Hasher created by MakeHasher() may be shared among many +// Maps. This is recommended if the instances have many keys in +// common, as it will amortize the cost of hash computation. +// +// A Hasher may grow without bound as new types are seen. Even when a +// type is deleted from the map, the Hasher never shrinks, since other +// types in the map may reference the deleted type indirectly. +// +// Hashers are not thread-safe, and read-only operations such as +// Map.Lookup require updates to the hasher, so a full Mutex lock (not a +// read-lock) is require around all Map operations if a shared +// hasher is accessed from multiple threads. +// +// If SetHasher is not called, the Map will create a private hasher at +// the first call to Insert. +// +func (m *Map) SetHasher(hasher Hasher) { + m.hasher = hasher +} + +// Delete removes the entry with the given key, if any. +// It returns true if the entry was found. +// +func (m *Map) Delete(key types.Type) bool { + if m != nil && m.table != nil { + hash := m.hasher.Hash(key) + bucket := m.table[hash] + for i, e := range bucket { + if e.key != nil && types.Identical(key, e.key) { + // We can't compact the bucket as it + // would disturb iterators. + bucket[i] = entry{} + m.length-- + return true + } + } + } + return false +} + +// At returns the map entry for the given key. +// The result is nil if the entry is not present. +// +func (m *Map) At(key types.Type) interface{} { + if m != nil && m.table != nil { + for _, e := range m.table[m.hasher.Hash(key)] { + if e.key != nil && types.Identical(key, e.key) { + return e.value + } + } + } + return nil +} + +// Set sets the map entry for key to val, +// and returns the previous entry, if any. +func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) { + if m.table != nil { + hash := m.hasher.Hash(key) + bucket := m.table[hash] + var hole *entry + for i, e := range bucket { + if e.key == nil { + hole = &bucket[i] + } else if types.Identical(key, e.key) { + prev = e.value + bucket[i].value = value + return + } + } + + if hole != nil { + *hole = entry{key, value} // overwrite deleted entry + } else { + m.table[hash] = append(bucket, entry{key, value}) + } + } else { + if m.hasher.memo == nil { + m.hasher = MakeHasher() + } + hash := m.hasher.Hash(key) + m.table = map[uint32][]entry{hash: {entry{key, value}}} + } + + m.length++ + return +} + +// Len returns the number of map entries. +func (m *Map) Len() int { + if m != nil { + return m.length + } + return 0 +} + +// Iterate calls function f on each entry in the map in unspecified order. +// +// If f should mutate the map, Iterate provides the same guarantees as +// Go maps: if f deletes a map entry that Iterate has not yet reached, +// f will not be invoked for it, but if f inserts a map entry that +// Iterate has not yet reached, whether or not f will be invoked for +// it is unspecified. +// +func (m *Map) Iterate(f func(key types.Type, value interface{})) { + if m != nil { + for _, bucket := range m.table { + for _, e := range bucket { + if e.key != nil { + f(e.key, e.value) + } + } + } + } +} + +// Keys returns a new slice containing the set of map keys. +// The order is unspecified. +func (m *Map) Keys() []types.Type { + keys := make([]types.Type, 0, m.Len()) + m.Iterate(func(key types.Type, _ interface{}) { + keys = append(keys, key) + }) + return keys +} + +func (m *Map) toString(values bool) string { + if m == nil { + return "{}" + } + var buf bytes.Buffer + fmt.Fprint(&buf, "{") + sep := "" + m.Iterate(func(key types.Type, value interface{}) { + fmt.Fprint(&buf, sep) + sep = ", " + fmt.Fprint(&buf, key) + if values { + fmt.Fprintf(&buf, ": %q", value) + } + }) + fmt.Fprint(&buf, "}") + return buf.String() +} + +// String returns a string representation of the map's entries. +// Values are printed using fmt.Sprintf("%v", v). +// Order is unspecified. +// +func (m *Map) String() string { + return m.toString(true) +} + +// KeysString returns a string representation of the map's key set. +// Order is unspecified. +// +func (m *Map) KeysString() string { + return m.toString(false) +} + +//////////////////////////////////////////////////////////////////////// +// Hasher + +// A Hasher maps each type to its hash value. +// For efficiency, a hasher uses memoization; thus its memory +// footprint grows monotonically over time. +// Hashers are not thread-safe. +// Hashers have reference semantics. +// Call MakeHasher to create a Hasher. +type Hasher struct { + memo map[types.Type]uint32 +} + +// MakeHasher returns a new Hasher instance. +func MakeHasher() Hasher { + return Hasher{make(map[types.Type]uint32)} +} + +// Hash computes a hash value for the given type t such that +// Identical(t, t') => Hash(t) == Hash(t'). +func (h Hasher) Hash(t types.Type) uint32 { + hash, ok := h.memo[t] + if !ok { + hash = h.hashFor(t) + h.memo[t] = hash + } + return hash +} + +// hashString computes the Fowler–Noll–Vo hash of s. +func hashString(s string) uint32 { + var h uint32 + for i := 0; i < len(s); i++ { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +// hashFor computes the hash of t. +func (h Hasher) hashFor(t types.Type) uint32 { + // See Identical for rationale. + switch t := t.(type) { + case *types.Basic: + return uint32(t.Kind()) + + case *types.Array: + return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem()) + + case *types.Slice: + return 9049 + 2*h.Hash(t.Elem()) + + case *types.Struct: + var hash uint32 = 9059 + for i, n := 0, t.NumFields(); i < n; i++ { + f := t.Field(i) + if f.Anonymous() { + hash += 8861 + } + hash += hashString(t.Tag(i)) + hash += hashString(f.Name()) // (ignore f.Pkg) + hash += h.Hash(f.Type()) + } + return hash + + case *types.Pointer: + return 9067 + 2*h.Hash(t.Elem()) + + case *types.Signature: + var hash uint32 = 9091 + if t.Variadic() { + hash *= 8863 + } + return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) + + case *types.Interface: + var hash uint32 = 9103 + for i, n := 0, t.NumMethods(); i < n; i++ { + // See go/types.identicalMethods for rationale. + // Method order is not significant. + // Ignore m.Pkg(). + m := t.Method(i) + hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type()) + } + return hash + + case *types.Map: + return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem()) + + case *types.Chan: + return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem()) + + case *types.Named: + // Not safe with a copying GC; objects may move. + return uint32(reflect.ValueOf(t.Obj()).Pointer()) + + case *types.Tuple: + return h.hashTuple(t) + } + panic(t) +} + +func (h Hasher) hashTuple(tuple *types.Tuple) uint32 { + // See go/types.identicalTypes for rationale. + n := tuple.Len() + var hash uint32 = 9137 + 2*uint32(n) + for i := 0; i < n; i++ { + hash += 3 * h.Hash(tuple.At(i).Type()) + } + return hash +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go new file mode 100644 index 00000000..32084610 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go @@ -0,0 +1,72 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements a cache of method sets. + +package typeutil + +import ( + "go/types" + "sync" +) + +// A MethodSetCache records the method set of each type T for which +// MethodSet(T) is called so that repeat queries are fast. +// The zero value is a ready-to-use cache instance. +type MethodSetCache struct { + mu sync.Mutex + named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N + others map[types.Type]*types.MethodSet // all other types +} + +// MethodSet returns the method set of type T. It is thread-safe. +// +// If cache is nil, this function is equivalent to types.NewMethodSet(T). +// Utility functions can thus expose an optional *MethodSetCache +// parameter to clients that care about performance. +// +func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { + if cache == nil { + return types.NewMethodSet(T) + } + cache.mu.Lock() + defer cache.mu.Unlock() + + switch T := T.(type) { + case *types.Named: + return cache.lookupNamed(T).value + + case *types.Pointer: + if N, ok := T.Elem().(*types.Named); ok { + return cache.lookupNamed(N).pointer + } + } + + // all other types + // (The map uses pointer equivalence, not type identity.) + mset := cache.others[T] + if mset == nil { + mset = types.NewMethodSet(T) + if cache.others == nil { + cache.others = make(map[types.Type]*types.MethodSet) + } + cache.others[T] = mset + } + return mset +} + +func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } { + if cache.named == nil { + cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet }) + } + // Avoid recomputing mset(*T) for each distinct Pointer + // instance whose underlying type is a named type. + msets, ok := cache.named[named] + if !ok { + msets.value = types.NewMethodSet(named) + msets.pointer = types.NewMethodSet(types.NewPointer(named)) + cache.named[named] = msets + } + return msets +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go new file mode 100644 index 00000000..9849c24c --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/ui.go @@ -0,0 +1,52 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +// This file defines utilities for user interfaces that display types. + +import "go/types" + +// IntuitiveMethodSet returns the intuitive method set of a type T, +// which is the set of methods you can call on an addressable value of +// that type. +// +// The result always contains MethodSet(T), and is exactly MethodSet(T) +// for interface types and for pointer-to-concrete types. +// For all other concrete types T, the result additionally +// contains each method belonging to *T if there is no identically +// named method on T itself. +// +// This corresponds to user intuition about method sets; +// this function is intended only for user interfaces. +// +// The order of the result is as for types.MethodSet(T). +// +func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { + isPointerToConcrete := func(T types.Type) bool { + ptr, ok := T.(*types.Pointer) + return ok && !types.IsInterface(ptr.Elem()) + } + + var result []*types.Selection + mset := msets.MethodSet(T) + if types.IsInterface(T) || isPointerToConcrete(T) { + for i, n := 0, mset.Len(); i < n; i++ { + result = append(result, mset.At(i)) + } + } else { + // T is some other concrete type. + // Report methods of T and *T, preferring those of T. + pmset := msets.MethodSet(types.NewPointer(T)) + for i, n := 0, pmset.Len(); i < n; i++ { + meth := pmset.At(i) + if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil { + meth = m + } + result = append(result, meth) + } + + } + return result +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go new file mode 100644 index 00000000..7219c8e9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go @@ -0,0 +1,196 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fastwalk provides a faster version of filepath.Walk for file system +// scanning tools. +package fastwalk + +import ( + "errors" + "os" + "path/filepath" + "runtime" + "sync" +) + +// TraverseLink is used as a return value from WalkFuncs to indicate that the +// symlink named in the call may be traversed. +var TraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory") + +// SkipFiles is a used as a return value from WalkFuncs to indicate that the +// callback should not be called for any other files in the current directory. +// Child directories will still be traversed. +var SkipFiles = errors.New("fastwalk: skip remaining files in directory") + +// Walk is a faster implementation of filepath.Walk. +// +// filepath.Walk's design necessarily calls os.Lstat on each file, +// even if the caller needs less info. +// Many tools need only the type of each file. +// On some platforms, this information is provided directly by the readdir +// system call, avoiding the need to stat each file individually. +// fastwalk_unix.go contains a fork of the syscall routines. +// +// See golang.org/issue/16399 +// +// Walk walks the file tree rooted at root, calling walkFn for +// each file or directory in the tree, including root. +// +// If fastWalk returns filepath.SkipDir, the directory is skipped. +// +// Unlike filepath.Walk: +// * file stat calls must be done by the user. +// The only provided metadata is the file type, which does not include +// any permission bits. +// * multiple goroutines stat the filesystem concurrently. The provided +// walkFn must be safe for concurrent use. +// * fastWalk can follow symlinks if walkFn returns the TraverseLink +// sentinel error. It is the walkFn's responsibility to prevent +// fastWalk from going into symlink cycles. +func Walk(root string, walkFn func(path string, typ os.FileMode) error) error { + // TODO(bradfitz): make numWorkers configurable? We used a + // minimum of 4 to give the kernel more info about multiple + // things we want, in hopes its I/O scheduling can take + // advantage of that. Hopefully most are in cache. Maybe 4 is + // even too low of a minimum. Profile more. + numWorkers := 4 + if n := runtime.NumCPU(); n > numWorkers { + numWorkers = n + } + + // Make sure to wait for all workers to finish, otherwise + // walkFn could still be called after returning. This Wait call + // runs after close(e.donec) below. + var wg sync.WaitGroup + defer wg.Wait() + + w := &walker{ + fn: walkFn, + enqueuec: make(chan walkItem, numWorkers), // buffered for performance + workc: make(chan walkItem, numWorkers), // buffered for performance + donec: make(chan struct{}), + + // buffered for correctness & not leaking goroutines: + resc: make(chan error, numWorkers), + } + defer close(w.donec) + + for i := 0; i < numWorkers; i++ { + wg.Add(1) + go w.doWork(&wg) + } + todo := []walkItem{{dir: root}} + out := 0 + for { + workc := w.workc + var workItem walkItem + if len(todo) == 0 { + workc = nil + } else { + workItem = todo[len(todo)-1] + } + select { + case workc <- workItem: + todo = todo[:len(todo)-1] + out++ + case it := <-w.enqueuec: + todo = append(todo, it) + case err := <-w.resc: + out-- + if err != nil { + return err + } + if out == 0 && len(todo) == 0 { + // It's safe to quit here, as long as the buffered + // enqueue channel isn't also readable, which might + // happen if the worker sends both another unit of + // work and its result before the other select was + // scheduled and both w.resc and w.enqueuec were + // readable. + select { + case it := <-w.enqueuec: + todo = append(todo, it) + default: + return nil + } + } + } + } +} + +// doWork reads directories as instructed (via workc) and runs the +// user's callback function. +func (w *walker) doWork(wg *sync.WaitGroup) { + defer wg.Done() + for { + select { + case <-w.donec: + return + case it := <-w.workc: + select { + case <-w.donec: + return + case w.resc <- w.walk(it.dir, !it.callbackDone): + } + } + } +} + +type walker struct { + fn func(path string, typ os.FileMode) error + + donec chan struct{} // closed on fastWalk's return + workc chan walkItem // to workers + enqueuec chan walkItem // from workers + resc chan error // from workers +} + +type walkItem struct { + dir string + callbackDone bool // callback already called; don't do it again +} + +func (w *walker) enqueue(it walkItem) { + select { + case w.enqueuec <- it: + case <-w.donec: + } +} + +func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error { + joined := dirName + string(os.PathSeparator) + baseName + if typ == os.ModeDir { + w.enqueue(walkItem{dir: joined}) + return nil + } + + err := w.fn(joined, typ) + if typ == os.ModeSymlink { + if err == TraverseLink { + // Set callbackDone so we don't call it twice for both the + // symlink-as-symlink and the symlink-as-directory later: + w.enqueue(walkItem{dir: joined, callbackDone: true}) + return nil + } + if err == filepath.SkipDir { + // Permit SkipDir on symlinks too. + return nil + } + } + return err +} + +func (w *walker) walk(root string, runUserCallback bool) error { + if runUserCallback { + err := w.fn(root, os.ModeDir) + if err == filepath.SkipDir { + return nil + } + if err != nil { + return err + } + } + + return readDir(root, w.onDirEnt) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go new file mode 100644 index 00000000..ccffec5a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go @@ -0,0 +1,13 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd + +package fastwalk + +import "syscall" + +func direntInode(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Fileno) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go new file mode 100644 index 00000000..ab7fbc0a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin +// +build !appengine + +package fastwalk + +import "syscall" + +func direntInode(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Ino) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go new file mode 100644 index 00000000..a3b26a7b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd openbsd netbsd + +package fastwalk + +import "syscall" + +func direntNamlen(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Namlen) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go new file mode 100644 index 00000000..e880d358 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build !appengine + +package fastwalk + +import ( + "bytes" + "syscall" + "unsafe" +) + +func direntNamlen(dirent *syscall.Dirent) uint64 { + const fixedHdr = uint16(unsafe.Offsetof(syscall.Dirent{}.Name)) + nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) + const nameBufLen = uint16(len(nameBuf)) + limit := dirent.Reclen - fixedHdr + if limit > nameBufLen { + limit = nameBufLen + } + nameLen := bytes.IndexByte(nameBuf[:limit], 0) + if nameLen < 0 { + panic("failed to find terminating 0 byte in dirent") + } + return uint64(nameLen) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go new file mode 100644 index 00000000..a906b875 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go @@ -0,0 +1,37 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine !linux,!darwin,!freebsd,!openbsd,!netbsd + +package fastwalk + +import ( + "io/ioutil" + "os" +) + +// readDir calls fn for each directory entry in dirName. +// It does not descend into directories or follow symlinks. +// If fn returns a non-nil error, readDir returns with that error +// immediately. +func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { + fis, err := ioutil.ReadDir(dirName) + if err != nil { + return err + } + skipFiles := false + for _, fi := range fis { + if fi.Mode().IsRegular() && skipFiles { + continue + } + if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil { + if err == SkipFiles { + skipFiles = true + continue + } + return err + } + } + return nil +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go new file mode 100644 index 00000000..3369b1a0 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go @@ -0,0 +1,127 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin freebsd openbsd netbsd +// +build !appengine + +package fastwalk + +import ( + "fmt" + "os" + "syscall" + "unsafe" +) + +const blockSize = 8 << 10 + +// unknownFileMode is a sentinel (and bogus) os.FileMode +// value used to represent a syscall.DT_UNKNOWN Dirent.Type. +const unknownFileMode os.FileMode = os.ModeNamedPipe | os.ModeSocket | os.ModeDevice + +func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { + fd, err := syscall.Open(dirName, 0, 0) + if err != nil { + return &os.PathError{Op: "open", Path: dirName, Err: err} + } + defer syscall.Close(fd) + + // The buffer must be at least a block long. + buf := make([]byte, blockSize) // stack-allocated; doesn't escape + bufp := 0 // starting read position in buf + nbuf := 0 // end valid data in buf + skipFiles := false + for { + if bufp >= nbuf { + bufp = 0 + nbuf, err = syscall.ReadDirent(fd, buf) + if err != nil { + return os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + return nil + } + } + consumed, name, typ := parseDirEnt(buf[bufp:nbuf]) + bufp += consumed + if name == "" || name == "." || name == ".." { + continue + } + // Fallback for filesystems (like old XFS) that don't + // support Dirent.Type and have DT_UNKNOWN (0) there + // instead. + if typ == unknownFileMode { + fi, err := os.Lstat(dirName + "/" + name) + if err != nil { + // It got deleted in the meantime. + if os.IsNotExist(err) { + continue + } + return err + } + typ = fi.Mode() & os.ModeType + } + if skipFiles && typ.IsRegular() { + continue + } + if err := fn(dirName, name, typ); err != nil { + if err == SkipFiles { + skipFiles = true + continue + } + return err + } + } +} + +func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) { + // golang.org/issue/15653 + dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) + if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v { + panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v)) + } + if len(buf) < int(dirent.Reclen) { + panic(fmt.Sprintf("buf size %d < record length %d", len(buf), dirent.Reclen)) + } + consumed = int(dirent.Reclen) + if direntInode(dirent) == 0 { // File absent in directory. + return + } + switch dirent.Type { + case syscall.DT_REG: + typ = 0 + case syscall.DT_DIR: + typ = os.ModeDir + case syscall.DT_LNK: + typ = os.ModeSymlink + case syscall.DT_BLK: + typ = os.ModeDevice + case syscall.DT_FIFO: + typ = os.ModeNamedPipe + case syscall.DT_SOCK: + typ = os.ModeSocket + case syscall.DT_UNKNOWN: + typ = unknownFileMode + default: + // Skip weird things. + // It's probably a DT_WHT (http://lwn.net/Articles/325369/) + // or something. Revisit if/when this package is moved outside + // of goimports. goimports only cares about regular files, + // symlinks, and directories. + return + } + + nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) + nameLen := direntNamlen(dirent) + + // Special cases for common things: + if nameLen == 1 && nameBuf[0] == '.' { + name = "." + } else if nameLen == 2 && nameBuf[0] == '.' && nameBuf[1] == '.' { + name = ".." + } else { + name = string(nameBuf[:nameLen]) + } + return +} diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go new file mode 100644 index 00000000..04bb96a3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -0,0 +1,250 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gopathwalk is like filepath.Walk but specialized for finding Go +// packages, particularly in $GOPATH and $GOROOT. +package gopathwalk + +import ( + "bufio" + "bytes" + "fmt" + "go/build" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + + "golang.org/x/tools/internal/fastwalk" +) + +// Options controls the behavior of a Walk call. +type Options struct { + Debug bool // Enable debug logging + ModulesEnabled bool // Search module caches. Also disables legacy goimports ignore rules. +} + +// RootType indicates the type of a Root. +type RootType int + +const ( + RootUnknown RootType = iota + RootGOROOT + RootGOPATH + RootCurrentModule + RootModuleCache + RootOther +) + +// A Root is a starting point for a Walk. +type Root struct { + Path string + Type RootType +} + +// SrcDirsRoots returns the roots from build.Default.SrcDirs(). Not modules-compatible. +func SrcDirsRoots(ctx *build.Context) []Root { + var roots []Root + roots = append(roots, Root{filepath.Join(ctx.GOROOT, "src"), RootGOROOT}) + for _, p := range filepath.SplitList(ctx.GOPATH) { + roots = append(roots, Root{filepath.Join(p, "src"), RootGOPATH}) + } + return roots +} + +// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// For each package found, add will be called (concurrently) with the absolute +// paths of the containing source directory and the package directory. +// add will be called concurrently. +func Walk(roots []Root, add func(root Root, dir string), opts Options) { + for _, root := range roots { + walkDir(root, add, opts) + } +} + +func walkDir(root Root, add func(Root, string), opts Options) { + if _, err := os.Stat(root.Path); os.IsNotExist(err) { + if opts.Debug { + log.Printf("skipping nonexistant directory: %v", root.Path) + } + return + } + if opts.Debug { + log.Printf("scanning %s", root.Path) + } + w := &walker{ + root: root, + add: add, + opts: opts, + } + w.init() + if err := fastwalk.Walk(root.Path, w.walk); err != nil { + log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err) + } + + if opts.Debug { + log.Printf("scanned %s", root.Path) + } +} + +// walker is the callback for fastwalk.Walk. +type walker struct { + root Root // The source directory to scan. + add func(Root, string) // The callback that will be invoked for every possible Go package dir. + opts Options // Options passed to Walk by the user. + + ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files. +} + +// init initializes the walker based on its Options. +func (w *walker) init() { + var ignoredPaths []string + if w.root.Type == RootModuleCache { + ignoredPaths = []string{"cache"} + } + if !w.opts.ModulesEnabled && w.root.Type == RootGOPATH { + ignoredPaths = w.getIgnoredDirs(w.root.Path) + ignoredPaths = append(ignoredPaths, "v", "mod") + } + + for _, p := range ignoredPaths { + full := filepath.Join(w.root.Path, p) + if fi, err := os.Stat(full); err == nil { + w.ignoredDirs = append(w.ignoredDirs, fi) + if w.opts.Debug { + log.Printf("Directory added to ignore list: %s", full) + } + } else if w.opts.Debug { + log.Printf("Error statting ignored directory: %v", err) + } + } +} + +// getIgnoredDirs reads an optional config file at /.goimportsignore +// of relative directories to ignore when scanning for go files. +// The provided path is one of the $GOPATH entries with "src" appended. +func (w *walker) getIgnoredDirs(path string) []string { + file := filepath.Join(path, ".goimportsignore") + slurp, err := ioutil.ReadFile(file) + if w.opts.Debug { + if err != nil { + log.Print(err) + } else { + log.Printf("Read %s", file) + } + } + if err != nil { + return nil + } + + var ignoredDirs []string + bs := bufio.NewScanner(bytes.NewReader(slurp)) + for bs.Scan() { + line := strings.TrimSpace(bs.Text()) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + ignoredDirs = append(ignoredDirs, line) + } + return ignoredDirs +} + +func (w *walker) shouldSkipDir(fi os.FileInfo) bool { + for _, ignoredDir := range w.ignoredDirs { + if os.SameFile(fi, ignoredDir) { + return true + } + } + return false +} + +func (w *walker) walk(path string, typ os.FileMode) error { + dir := filepath.Dir(path) + if typ.IsRegular() { + if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) { + // Doesn't make sense to have regular files + // directly in your $GOPATH/src or $GOROOT/src. + return fastwalk.SkipFiles + } + if !strings.HasSuffix(path, ".go") { + return nil + } + + w.add(w.root, dir) + return fastwalk.SkipFiles + } + if typ == os.ModeDir { + base := filepath.Base(path) + if base == "" || base[0] == '.' || base[0] == '_' || + base == "testdata" || + (w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") || + (!w.opts.ModulesEnabled && base == "node_modules") { + return filepath.SkipDir + } + fi, err := os.Lstat(path) + if err == nil && w.shouldSkipDir(fi) { + return filepath.SkipDir + } + return nil + } + if typ == os.ModeSymlink { + base := filepath.Base(path) + if strings.HasPrefix(base, ".#") { + // Emacs noise. + return nil + } + fi, err := os.Lstat(path) + if err != nil { + // Just ignore it. + return nil + } + if w.shouldTraverse(dir, fi) { + return fastwalk.TraverseLink + } + } + return nil +} + +// shouldTraverse reports whether the symlink fi, found in dir, +// should be followed. It makes sure symlinks were never visited +// before to avoid symlink loops. +func (w *walker) shouldTraverse(dir string, fi os.FileInfo) bool { + path := filepath.Join(dir, fi.Name()) + target, err := filepath.EvalSymlinks(path) + if err != nil { + return false + } + ts, err := os.Stat(target) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return false + } + if !ts.IsDir() { + return false + } + if w.shouldSkipDir(ts) { + return false + } + // Check for symlink loops by statting each directory component + // and seeing if any are the same file as ts. + for { + parent := filepath.Dir(path) + if parent == path { + // Made it to the root without seeing a cycle. + // Use this symlink. + return true + } + parentInfo, err := os.Stat(parent) + if err != nil { + return false + } + if os.SameFile(ts, parentInfo) { + // Cycle. Don't traverse. + return false + } + path = parent + } + +} diff --git a/vendor/golang.org/x/tools/internal/semver/semver.go b/vendor/golang.org/x/tools/internal/semver/semver.go new file mode 100644 index 00000000..4af7118e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/semver/semver.go @@ -0,0 +1,388 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semver implements comparison of semantic version strings. +// In this package, semantic version strings must begin with a leading "v", +// as in "v1.0.0". +// +// The general form of a semantic version string accepted by this package is +// +// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]] +// +// where square brackets indicate optional parts of the syntax; +// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros; +// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers +// using only alphanumeric characters and hyphens; and +// all-numeric PRERELEASE identifiers must not have leading zeros. +// +// This package follows Semantic Versioning 2.0.0 (see semver.org) +// with two exceptions. First, it requires the "v" prefix. Second, it recognizes +// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes) +// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. +package semver + +// parsed returns the parsed form of a semantic version string. +type parsed struct { + major string + minor string + patch string + short string + prerelease string + build string + err string +} + +// IsValid reports whether v is a valid semantic version string. +func IsValid(v string) bool { + _, ok := parse(v) + return ok +} + +// Canonical returns the canonical formatting of the semantic version v. +// It fills in any missing .MINOR or .PATCH and discards build metadata. +// Two semantic versions compare equal only if their canonical formattings +// are identical strings. +// The canonical invalid semantic version is the empty string. +func Canonical(v string) string { + p, ok := parse(v) + if !ok { + return "" + } + if p.build != "" { + return v[:len(v)-len(p.build)] + } + if p.short != "" { + return v + p.short + } + return v +} + +// Major returns the major version prefix of the semantic version v. +// For example, Major("v2.1.0") == "v2". +// If v is an invalid semantic version string, Major returns the empty string. +func Major(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return v[:1+len(pv.major)] +} + +// MajorMinor returns the major.minor version prefix of the semantic version v. +// For example, MajorMinor("v2.1.0") == "v2.1". +// If v is an invalid semantic version string, MajorMinor returns the empty string. +func MajorMinor(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + i := 1 + len(pv.major) + if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor { + return v[:j] + } + return v[:i] + "." + pv.minor +} + +// Prerelease returns the prerelease suffix of the semantic version v. +// For example, Prerelease("v2.1.0-pre+meta") == "-pre". +// If v is an invalid semantic version string, Prerelease returns the empty string. +func Prerelease(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.prerelease +} + +// Build returns the build suffix of the semantic version v. +// For example, Build("v2.1.0+meta") == "+meta". +// If v is an invalid semantic version string, Build returns the empty string. +func Build(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.build +} + +// Compare returns an integer comparing two versions according to +// according to semantic version precedence. +// The result will be 0 if v == w, -1 if v < w, or +1 if v > w. +// +// An invalid semantic version string is considered less than a valid one. +// All invalid semantic version strings compare equal to each other. +func Compare(v, w string) int { + pv, ok1 := parse(v) + pw, ok2 := parse(w) + if !ok1 && !ok2 { + return 0 + } + if !ok1 { + return -1 + } + if !ok2 { + return +1 + } + if c := compareInt(pv.major, pw.major); c != 0 { + return c + } + if c := compareInt(pv.minor, pw.minor); c != 0 { + return c + } + if c := compareInt(pv.patch, pw.patch); c != 0 { + return c + } + return comparePrerelease(pv.prerelease, pw.prerelease) +} + +// Max canonicalizes its arguments and then returns the version string +// that compares greater. +func Max(v, w string) string { + v = Canonical(v) + w = Canonical(w) + if Compare(v, w) > 0 { + return v + } + return w +} + +func parse(v string) (p parsed, ok bool) { + if v == "" || v[0] != 'v' { + p.err = "missing v prefix" + return + } + p.major, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad major version" + return + } + if v == "" { + p.minor = "0" + p.patch = "0" + p.short = ".0.0" + return + } + if v[0] != '.' { + p.err = "bad minor prefix" + ok = false + return + } + p.minor, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad minor version" + return + } + if v == "" { + p.patch = "0" + p.short = ".0" + return + } + if v[0] != '.' { + p.err = "bad patch prefix" + ok = false + return + } + p.patch, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad patch version" + return + } + if len(v) > 0 && v[0] == '-' { + p.prerelease, v, ok = parsePrerelease(v) + if !ok { + p.err = "bad prerelease" + return + } + } + if len(v) > 0 && v[0] == '+' { + p.build, v, ok = parseBuild(v) + if !ok { + p.err = "bad build" + return + } + } + if v != "" { + p.err = "junk on end" + ok = false + return + } + ok = true + return +} + +func parseInt(v string) (t, rest string, ok bool) { + if v == "" { + return + } + if v[0] < '0' || '9' < v[0] { + return + } + i := 1 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + if v[0] == '0' && i != 1 { + return + } + return v[:i], v[i:], true +} + +func parsePrerelease(v string) (t, rest string, ok bool) { + // "A pre-release version MAY be denoted by appending a hyphen and + // a series of dot separated identifiers immediately following the patch version. + // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-]. + // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes." + if v == "" || v[0] != '-' { + return + } + i := 1 + start := 1 + for i < len(v) && v[i] != '+' { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i || isBadNum(v[start:i]) { + return + } + start = i + 1 + } + i++ + } + if start == i || isBadNum(v[start:i]) { + return + } + return v[:i], v[i:], true +} + +func parseBuild(v string) (t, rest string, ok bool) { + if v == "" || v[0] != '+' { + return + } + i := 1 + start := 1 + for i < len(v) { + if !isIdentChar(v[i]) { + return + } + if v[i] == '.' { + if start == i { + return + } + start = i + 1 + } + i++ + } + if start == i { + return + } + return v[:i], v[i:], true +} + +func isIdentChar(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-' +} + +func isBadNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) && i > 1 && v[0] == '0' +} + +func isNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) +} + +func compareInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} + +func comparePrerelease(x, y string) int { + // "When major, minor, and patch are equal, a pre-release version has + // lower precedence than a normal version. + // Example: 1.0.0-alpha < 1.0.0. + // Precedence for two pre-release versions with the same major, minor, + // and patch version MUST be determined by comparing each dot separated + // identifier from left to right until a difference is found as follows: + // identifiers consisting of only digits are compared numerically and + // identifiers with letters or hyphens are compared lexically in ASCII + // sort order. Numeric identifiers always have lower precedence than + // non-numeric identifiers. A larger set of pre-release fields has a + // higher precedence than a smaller set, if all of the preceding + // identifiers are equal. + // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < + // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0." + if x == y { + return 0 + } + if x == "" { + return +1 + } + if y == "" { + return -1 + } + for x != "" && y != "" { + x = x[1:] // skip - or . + y = y[1:] // skip - or . + var dx, dy string + dx, x = nextIdent(x) + dy, y = nextIdent(y) + if dx != dy { + ix := isNum(dx) + iy := isNum(dy) + if ix != iy { + if ix { + return -1 + } else { + return +1 + } + } + if ix { + if len(dx) < len(dy) { + return -1 + } + if len(dx) > len(dy) { + return +1 + } + } + if dx < dy { + return -1 + } else { + return +1 + } + } + } + if x == "" { + return -1 + } else { + return +1 + } +} + +func nextIdent(x string) (dx, rest string) { + i := 0 + for i < len(x) && x[i] != '.' { + i++ + } + return x[:i], x[i:] +}