forked from cerc-io/ipld-eth-server
26316 lines
629 KiB
Go
26316 lines
629 KiB
Go
// Code generated by command: avogen -output zinstructions.go build. DO NOT EDIT.
|
|
|
|
package build
|
|
|
|
import (
|
|
"github.com/mmcloughlin/avo/operand"
|
|
"github.com/mmcloughlin/avo/x86"
|
|
)
|
|
|
|
// ADCB: Add with Carry.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCB imm8 al
|
|
// ADCB imm8 r8
|
|
// ADCB r8 r8
|
|
// ADCB m8 r8
|
|
// ADCB imm8 m8
|
|
// ADCB r8 m8
|
|
// Construct and append a ADCB instruction to the active function.
|
|
func (c *Context) ADCB(imr, amr operand.Op) {
|
|
if inst, err := x86.ADCB(imr, amr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ADCB: Add with Carry.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCB imm8 al
|
|
// ADCB imm8 r8
|
|
// ADCB r8 r8
|
|
// ADCB m8 r8
|
|
// ADCB imm8 m8
|
|
// ADCB r8 m8
|
|
// Construct and append a ADCB instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADCB(imr, amr operand.Op) { ctx.ADCB(imr, amr) }
|
|
|
|
// ADCL: Add with Carry.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCL imm32 eax
|
|
// ADCL imm8 r32
|
|
// ADCL imm32 r32
|
|
// ADCL r32 r32
|
|
// ADCL m32 r32
|
|
// ADCL imm8 m32
|
|
// ADCL imm32 m32
|
|
// ADCL r32 m32
|
|
// Construct and append a ADCL instruction to the active function.
|
|
func (c *Context) ADCL(imr, emr operand.Op) {
|
|
if inst, err := x86.ADCL(imr, emr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ADCL: Add with Carry.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCL imm32 eax
|
|
// ADCL imm8 r32
|
|
// ADCL imm32 r32
|
|
// ADCL r32 r32
|
|
// ADCL m32 r32
|
|
// ADCL imm8 m32
|
|
// ADCL imm32 m32
|
|
// ADCL r32 m32
|
|
// Construct and append a ADCL instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADCL(imr, emr operand.Op) { ctx.ADCL(imr, emr) }
|
|
|
|
// ADCQ: Add with Carry.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCQ imm32 rax
|
|
// ADCQ imm8 r64
|
|
// ADCQ imm32 r64
|
|
// ADCQ r64 r64
|
|
// ADCQ m64 r64
|
|
// ADCQ imm8 m64
|
|
// ADCQ imm32 m64
|
|
// ADCQ r64 m64
|
|
// Construct and append a ADCQ instruction to the active function.
|
|
func (c *Context) ADCQ(imr, mr operand.Op) {
|
|
if inst, err := x86.ADCQ(imr, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ADCQ: Add with Carry.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCQ imm32 rax
|
|
// ADCQ imm8 r64
|
|
// ADCQ imm32 r64
|
|
// ADCQ r64 r64
|
|
// ADCQ m64 r64
|
|
// ADCQ imm8 m64
|
|
// ADCQ imm32 m64
|
|
// ADCQ r64 m64
|
|
// Construct and append a ADCQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADCQ(imr, mr operand.Op) { ctx.ADCQ(imr, mr) }
|
|
|
|
// ADCW: Add with Carry.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCW imm16 ax
|
|
// ADCW imm8 r16
|
|
// ADCW imm16 r16
|
|
// ADCW r16 r16
|
|
// ADCW m16 r16
|
|
// ADCW imm8 m16
|
|
// ADCW imm16 m16
|
|
// ADCW r16 m16
|
|
// Construct and append a ADCW instruction to the active function.
|
|
func (c *Context) ADCW(imr, amr operand.Op) {
|
|
if inst, err := x86.ADCW(imr, amr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ADCW: Add with Carry.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCW imm16 ax
|
|
// ADCW imm8 r16
|
|
// ADCW imm16 r16
|
|
// ADCW r16 r16
|
|
// ADCW m16 r16
|
|
// ADCW imm8 m16
|
|
// ADCW imm16 m16
|
|
// ADCW r16 m16
|
|
// Construct and append a ADCW instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADCW(imr, amr operand.Op) { ctx.ADCW(imr, amr) }
|
|
|
|
// ADCXL: Unsigned Integer Addition of Two Operands with Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCXL r32 r32
|
|
// ADCXL m32 r32
|
|
// Construct and append a ADCXL instruction to the active function.
|
|
func (c *Context) ADCXL(mr, r operand.Op) {
|
|
if inst, err := x86.ADCXL(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ADCXL: Unsigned Integer Addition of Two Operands with Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCXL r32 r32
|
|
// ADCXL m32 r32
|
|
// Construct and append a ADCXL instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADCXL(mr, r operand.Op) { ctx.ADCXL(mr, r) }
|
|
|
|
// ADCXQ: Unsigned Integer Addition of Two Operands with Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCXQ r64 r64
|
|
// ADCXQ m64 r64
|
|
// Construct and append a ADCXQ instruction to the active function.
|
|
func (c *Context) ADCXQ(mr, r operand.Op) {
|
|
if inst, err := x86.ADCXQ(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ADCXQ: Unsigned Integer Addition of Two Operands with Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCXQ r64 r64
|
|
// ADCXQ m64 r64
|
|
// Construct and append a ADCXQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADCXQ(mr, r operand.Op) { ctx.ADCXQ(mr, r) }
|
|
|
|
// ADDB: Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDB imm8 al
|
|
// ADDB imm8 r8
|
|
// ADDB r8 r8
|
|
// ADDB m8 r8
|
|
// ADDB imm8 m8
|
|
// ADDB r8 m8
|
|
// Construct and append a ADDB instruction to the active function.
|
|
func (c *Context) ADDB(imr, amr operand.Op) {
|
|
if inst, err := x86.ADDB(imr, amr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ADDB: Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDB imm8 al
|
|
// ADDB imm8 r8
|
|
// ADDB r8 r8
|
|
// ADDB m8 r8
|
|
// ADDB imm8 m8
|
|
// ADDB r8 m8
|
|
// Construct and append a ADDB instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADDB(imr, amr operand.Op) { ctx.ADDB(imr, amr) }
|
|
|
|
// ADDL: Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDL imm32 eax
|
|
// ADDL imm8 r32
|
|
// ADDL imm32 r32
|
|
// ADDL r32 r32
|
|
// ADDL m32 r32
|
|
// ADDL imm8 m32
|
|
// ADDL imm32 m32
|
|
// ADDL r32 m32
|
|
// Construct and append a ADDL instruction to the active function.
|
|
func (c *Context) ADDL(imr, emr operand.Op) {
|
|
if inst, err := x86.ADDL(imr, emr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ADDL: Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDL imm32 eax
|
|
// ADDL imm8 r32
|
|
// ADDL imm32 r32
|
|
// ADDL r32 r32
|
|
// ADDL m32 r32
|
|
// ADDL imm8 m32
|
|
// ADDL imm32 m32
|
|
// ADDL r32 m32
|
|
// Construct and append a ADDL instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADDL(imr, emr operand.Op) { ctx.ADDL(imr, emr) }
|
|
|
|
// ADDPD: Add Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDPD xmm xmm
|
|
// ADDPD m128 xmm
|
|
// Construct and append a ADDPD instruction to the active function.
|
|
func (c *Context) ADDPD(mx, x operand.Op) {
|
|
if inst, err := x86.ADDPD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ADDPD: Add Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDPD xmm xmm
|
|
// ADDPD m128 xmm
|
|
// Construct and append a ADDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADDPD(mx, x operand.Op) { ctx.ADDPD(mx, x) }
|
|
|
|
// ADDPS: Add Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDPS xmm xmm
|
|
// ADDPS m128 xmm
|
|
// Construct and append a ADDPS instruction to the active function.
|
|
func (c *Context) ADDPS(mx, x operand.Op) {
|
|
if inst, err := x86.ADDPS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ADDPS: Add Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDPS xmm xmm
|
|
// ADDPS m128 xmm
|
|
// Construct and append a ADDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADDPS(mx, x operand.Op) { ctx.ADDPS(mx, x) }
|
|
|
|
// ADDQ: Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDQ imm32 rax
|
|
// ADDQ imm8 r64
|
|
// ADDQ imm32 r64
|
|
// ADDQ r64 r64
|
|
// ADDQ m64 r64
|
|
// ADDQ imm8 m64
|
|
// ADDQ imm32 m64
|
|
// ADDQ r64 m64
|
|
// Construct and append a ADDQ instruction to the active function.
|
|
func (c *Context) ADDQ(imr, mr operand.Op) {
|
|
if inst, err := x86.ADDQ(imr, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ADDQ: Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDQ imm32 rax
|
|
// ADDQ imm8 r64
|
|
// ADDQ imm32 r64
|
|
// ADDQ r64 r64
|
|
// ADDQ m64 r64
|
|
// ADDQ imm8 m64
|
|
// ADDQ imm32 m64
|
|
// ADDQ r64 m64
|
|
// Construct and append a ADDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADDQ(imr, mr operand.Op) { ctx.ADDQ(imr, mr) }
|
|
|
|
// ADDSD: Add Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDSD xmm xmm
|
|
// ADDSD m64 xmm
|
|
// Construct and append a ADDSD instruction to the active function.
|
|
func (c *Context) ADDSD(mx, x operand.Op) {
|
|
if inst, err := x86.ADDSD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ADDSD: Add Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDSD xmm xmm
|
|
// ADDSD m64 xmm
|
|
// Construct and append a ADDSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADDSD(mx, x operand.Op) { ctx.ADDSD(mx, x) }
|
|
|
|
// ADDSS: Add Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDSS xmm xmm
|
|
// ADDSS m32 xmm
|
|
// Construct and append a ADDSS instruction to the active function.
|
|
func (c *Context) ADDSS(mx, x operand.Op) {
|
|
if inst, err := x86.ADDSS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ADDSS: Add Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDSS xmm xmm
|
|
// ADDSS m32 xmm
|
|
// Construct and append a ADDSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADDSS(mx, x operand.Op) { ctx.ADDSS(mx, x) }
|
|
|
|
// ADDSUBPD: Packed Double-FP Add/Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDSUBPD xmm xmm
|
|
// ADDSUBPD m128 xmm
|
|
// Construct and append a ADDSUBPD instruction to the active function.
|
|
func (c *Context) ADDSUBPD(mx, x operand.Op) {
|
|
if inst, err := x86.ADDSUBPD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ADDSUBPD: Packed Double-FP Add/Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDSUBPD xmm xmm
|
|
// ADDSUBPD m128 xmm
|
|
// Construct and append a ADDSUBPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADDSUBPD(mx, x operand.Op) { ctx.ADDSUBPD(mx, x) }
|
|
|
|
// ADDSUBPS: Packed Single-FP Add/Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDSUBPS xmm xmm
|
|
// ADDSUBPS m128 xmm
|
|
// Construct and append a ADDSUBPS instruction to the active function.
|
|
func (c *Context) ADDSUBPS(mx, x operand.Op) {
|
|
if inst, err := x86.ADDSUBPS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ADDSUBPS: Packed Single-FP Add/Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDSUBPS xmm xmm
|
|
// ADDSUBPS m128 xmm
|
|
// Construct and append a ADDSUBPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADDSUBPS(mx, x operand.Op) { ctx.ADDSUBPS(mx, x) }
|
|
|
|
// ADDW: Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDW imm16 ax
|
|
// ADDW imm8 r16
|
|
// ADDW imm16 r16
|
|
// ADDW r16 r16
|
|
// ADDW m16 r16
|
|
// ADDW imm8 m16
|
|
// ADDW imm16 m16
|
|
// ADDW r16 m16
|
|
// Construct and append a ADDW instruction to the active function.
|
|
func (c *Context) ADDW(imr, amr operand.Op) {
|
|
if inst, err := x86.ADDW(imr, amr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ADDW: Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDW imm16 ax
|
|
// ADDW imm8 r16
|
|
// ADDW imm16 r16
|
|
// ADDW r16 r16
|
|
// ADDW m16 r16
|
|
// ADDW imm8 m16
|
|
// ADDW imm16 m16
|
|
// ADDW r16 m16
|
|
// Construct and append a ADDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADDW(imr, amr operand.Op) { ctx.ADDW(imr, amr) }
|
|
|
|
// ADOXL: Unsigned Integer Addition of Two Operands with Overflow Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADOXL r32 r32
|
|
// ADOXL m32 r32
|
|
// Construct and append a ADOXL instruction to the active function.
|
|
func (c *Context) ADOXL(mr, r operand.Op) {
|
|
if inst, err := x86.ADOXL(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ADOXL: Unsigned Integer Addition of Two Operands with Overflow Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADOXL r32 r32
|
|
// ADOXL m32 r32
|
|
// Construct and append a ADOXL instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADOXL(mr, r operand.Op) { ctx.ADOXL(mr, r) }
|
|
|
|
// ADOXQ: Unsigned Integer Addition of Two Operands with Overflow Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADOXQ r64 r64
|
|
// ADOXQ m64 r64
|
|
// Construct and append a ADOXQ instruction to the active function.
|
|
func (c *Context) ADOXQ(mr, r operand.Op) {
|
|
if inst, err := x86.ADOXQ(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ADOXQ: Unsigned Integer Addition of Two Operands with Overflow Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADOXQ r64 r64
|
|
// ADOXQ m64 r64
|
|
// Construct and append a ADOXQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADOXQ(mr, r operand.Op) { ctx.ADOXQ(mr, r) }
|
|
|
|
// AESDEC: Perform One Round of an AES Decryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESDEC xmm xmm
|
|
// AESDEC m128 xmm
|
|
// Construct and append a AESDEC instruction to the active function.
|
|
func (c *Context) AESDEC(mx, x operand.Op) {
|
|
if inst, err := x86.AESDEC(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// AESDEC: Perform One Round of an AES Decryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESDEC xmm xmm
|
|
// AESDEC m128 xmm
|
|
// Construct and append a AESDEC instruction to the active function.
|
|
// Operates on the global context.
|
|
func AESDEC(mx, x operand.Op) { ctx.AESDEC(mx, x) }
|
|
|
|
// AESDECLAST: Perform Last Round of an AES Decryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESDECLAST xmm xmm
|
|
// AESDECLAST m128 xmm
|
|
// Construct and append a AESDECLAST instruction to the active function.
|
|
func (c *Context) AESDECLAST(mx, x operand.Op) {
|
|
if inst, err := x86.AESDECLAST(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// AESDECLAST: Perform Last Round of an AES Decryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESDECLAST xmm xmm
|
|
// AESDECLAST m128 xmm
|
|
// Construct and append a AESDECLAST instruction to the active function.
|
|
// Operates on the global context.
|
|
func AESDECLAST(mx, x operand.Op) { ctx.AESDECLAST(mx, x) }
|
|
|
|
// AESENC: Perform One Round of an AES Encryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESENC xmm xmm
|
|
// AESENC m128 xmm
|
|
// Construct and append a AESENC instruction to the active function.
|
|
func (c *Context) AESENC(mx, x operand.Op) {
|
|
if inst, err := x86.AESENC(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// AESENC: Perform One Round of an AES Encryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESENC xmm xmm
|
|
// AESENC m128 xmm
|
|
// Construct and append a AESENC instruction to the active function.
|
|
// Operates on the global context.
|
|
func AESENC(mx, x operand.Op) { ctx.AESENC(mx, x) }
|
|
|
|
// AESENCLAST: Perform Last Round of an AES Encryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESENCLAST xmm xmm
|
|
// AESENCLAST m128 xmm
|
|
// Construct and append a AESENCLAST instruction to the active function.
|
|
func (c *Context) AESENCLAST(mx, x operand.Op) {
|
|
if inst, err := x86.AESENCLAST(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// AESENCLAST: Perform Last Round of an AES Encryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESENCLAST xmm xmm
|
|
// AESENCLAST m128 xmm
|
|
// Construct and append a AESENCLAST instruction to the active function.
|
|
// Operates on the global context.
|
|
func AESENCLAST(mx, x operand.Op) { ctx.AESENCLAST(mx, x) }
|
|
|
|
// AESIMC: Perform the AES InvMixColumn Transformation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESIMC xmm xmm
|
|
// AESIMC m128 xmm
|
|
// Construct and append a AESIMC instruction to the active function.
|
|
func (c *Context) AESIMC(mx, x operand.Op) {
|
|
if inst, err := x86.AESIMC(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// AESIMC: Perform the AES InvMixColumn Transformation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESIMC xmm xmm
|
|
// AESIMC m128 xmm
|
|
// Construct and append a AESIMC instruction to the active function.
|
|
// Operates on the global context.
|
|
func AESIMC(mx, x operand.Op) { ctx.AESIMC(mx, x) }
|
|
|
|
// AESKEYGENASSIST: AES Round Key Generation Assist.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESKEYGENASSIST imm8 xmm xmm
|
|
// AESKEYGENASSIST imm8 m128 xmm
|
|
// Construct and append a AESKEYGENASSIST instruction to the active function.
|
|
func (c *Context) AESKEYGENASSIST(i, mx, x operand.Op) {
|
|
if inst, err := x86.AESKEYGENASSIST(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// AESKEYGENASSIST: AES Round Key Generation Assist.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESKEYGENASSIST imm8 xmm xmm
|
|
// AESKEYGENASSIST imm8 m128 xmm
|
|
// Construct and append a AESKEYGENASSIST instruction to the active function.
|
|
// Operates on the global context.
|
|
func AESKEYGENASSIST(i, mx, x operand.Op) { ctx.AESKEYGENASSIST(i, mx, x) }
|
|
|
|
// ANDB: Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDB imm8 al
|
|
// ANDB imm8 r8
|
|
// ANDB r8 r8
|
|
// ANDB m8 r8
|
|
// ANDB imm8 m8
|
|
// ANDB r8 m8
|
|
// Construct and append a ANDB instruction to the active function.
|
|
func (c *Context) ANDB(imr, amr operand.Op) {
|
|
if inst, err := x86.ANDB(imr, amr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ANDB: Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDB imm8 al
|
|
// ANDB imm8 r8
|
|
// ANDB r8 r8
|
|
// ANDB m8 r8
|
|
// ANDB imm8 m8
|
|
// ANDB r8 m8
|
|
// Construct and append a ANDB instruction to the active function.
|
|
// Operates on the global context.
|
|
func ANDB(imr, amr operand.Op) { ctx.ANDB(imr, amr) }
|
|
|
|
// ANDL: Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDL imm32 eax
|
|
// ANDL imm8 r32
|
|
// ANDL imm32 r32
|
|
// ANDL r32 r32
|
|
// ANDL m32 r32
|
|
// ANDL imm8 m32
|
|
// ANDL imm32 m32
|
|
// ANDL r32 m32
|
|
// Construct and append a ANDL instruction to the active function.
|
|
func (c *Context) ANDL(imr, emr operand.Op) {
|
|
if inst, err := x86.ANDL(imr, emr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ANDL: Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDL imm32 eax
|
|
// ANDL imm8 r32
|
|
// ANDL imm32 r32
|
|
// ANDL r32 r32
|
|
// ANDL m32 r32
|
|
// ANDL imm8 m32
|
|
// ANDL imm32 m32
|
|
// ANDL r32 m32
|
|
// Construct and append a ANDL instruction to the active function.
|
|
// Operates on the global context.
|
|
func ANDL(imr, emr operand.Op) { ctx.ANDL(imr, emr) }
|
|
|
|
// ANDNL: Logical AND NOT.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDNL r32 r32 r32
|
|
// ANDNL m32 r32 r32
|
|
// Construct and append a ANDNL instruction to the active function.
|
|
func (c *Context) ANDNL(mr, r, r1 operand.Op) {
|
|
if inst, err := x86.ANDNL(mr, r, r1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ANDNL: Logical AND NOT.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDNL r32 r32 r32
|
|
// ANDNL m32 r32 r32
|
|
// Construct and append a ANDNL instruction to the active function.
|
|
// Operates on the global context.
|
|
func ANDNL(mr, r, r1 operand.Op) { ctx.ANDNL(mr, r, r1) }
|
|
|
|
// ANDNPD: Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDNPD xmm xmm
|
|
// ANDNPD m128 xmm
|
|
// Construct and append a ANDNPD instruction to the active function.
|
|
func (c *Context) ANDNPD(mx, x operand.Op) {
|
|
if inst, err := x86.ANDNPD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ANDNPD: Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDNPD xmm xmm
|
|
// ANDNPD m128 xmm
|
|
// Construct and append a ANDNPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func ANDNPD(mx, x operand.Op) { ctx.ANDNPD(mx, x) }
|
|
|
|
// ANDNPS: Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDNPS xmm xmm
|
|
// ANDNPS m128 xmm
|
|
// Construct and append a ANDNPS instruction to the active function.
|
|
func (c *Context) ANDNPS(mx, x operand.Op) {
|
|
if inst, err := x86.ANDNPS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ANDNPS: Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDNPS xmm xmm
|
|
// ANDNPS m128 xmm
|
|
// Construct and append a ANDNPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func ANDNPS(mx, x operand.Op) { ctx.ANDNPS(mx, x) }
|
|
|
|
// ANDNQ: Logical AND NOT.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDNQ r64 r64 r64
|
|
// ANDNQ m64 r64 r64
|
|
// Construct and append a ANDNQ instruction to the active function.
|
|
func (c *Context) ANDNQ(mr, r, r1 operand.Op) {
|
|
if inst, err := x86.ANDNQ(mr, r, r1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ANDNQ: Logical AND NOT.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDNQ r64 r64 r64
|
|
// ANDNQ m64 r64 r64
|
|
// Construct and append a ANDNQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func ANDNQ(mr, r, r1 operand.Op) { ctx.ANDNQ(mr, r, r1) }
|
|
|
|
// ANDPD: Bitwise Logical AND of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDPD xmm xmm
|
|
// ANDPD m128 xmm
|
|
// Construct and append a ANDPD instruction to the active function.
|
|
func (c *Context) ANDPD(mx, x operand.Op) {
|
|
if inst, err := x86.ANDPD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ANDPD: Bitwise Logical AND of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDPD xmm xmm
|
|
// ANDPD m128 xmm
|
|
// Construct and append a ANDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func ANDPD(mx, x operand.Op) { ctx.ANDPD(mx, x) }
|
|
|
|
// ANDPS: Bitwise Logical AND of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDPS xmm xmm
|
|
// ANDPS m128 xmm
|
|
// Construct and append a ANDPS instruction to the active function.
|
|
func (c *Context) ANDPS(mx, x operand.Op) {
|
|
if inst, err := x86.ANDPS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ANDPS: Bitwise Logical AND of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDPS xmm xmm
|
|
// ANDPS m128 xmm
|
|
// Construct and append a ANDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func ANDPS(mx, x operand.Op) { ctx.ANDPS(mx, x) }
|
|
|
|
// ANDQ: Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDQ imm32 rax
|
|
// ANDQ imm8 r64
|
|
// ANDQ imm32 r64
|
|
// ANDQ r64 r64
|
|
// ANDQ m64 r64
|
|
// ANDQ imm8 m64
|
|
// ANDQ imm32 m64
|
|
// ANDQ r64 m64
|
|
// Construct and append a ANDQ instruction to the active function.
|
|
func (c *Context) ANDQ(imr, mr operand.Op) {
|
|
if inst, err := x86.ANDQ(imr, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ANDQ: Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDQ imm32 rax
|
|
// ANDQ imm8 r64
|
|
// ANDQ imm32 r64
|
|
// ANDQ r64 r64
|
|
// ANDQ m64 r64
|
|
// ANDQ imm8 m64
|
|
// ANDQ imm32 m64
|
|
// ANDQ r64 m64
|
|
// Construct and append a ANDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func ANDQ(imr, mr operand.Op) { ctx.ANDQ(imr, mr) }
|
|
|
|
// ANDW: Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDW imm16 ax
|
|
// ANDW imm8 r16
|
|
// ANDW imm16 r16
|
|
// ANDW r16 r16
|
|
// ANDW m16 r16
|
|
// ANDW imm8 m16
|
|
// ANDW imm16 m16
|
|
// ANDW r16 m16
|
|
// Construct and append a ANDW instruction to the active function.
|
|
func (c *Context) ANDW(imr, amr operand.Op) {
|
|
if inst, err := x86.ANDW(imr, amr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ANDW: Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDW imm16 ax
|
|
// ANDW imm8 r16
|
|
// ANDW imm16 r16
|
|
// ANDW r16 r16
|
|
// ANDW m16 r16
|
|
// ANDW imm8 m16
|
|
// ANDW imm16 m16
|
|
// ANDW r16 m16
|
|
// Construct and append a ANDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func ANDW(imr, amr operand.Op) { ctx.ANDW(imr, amr) }
|
|
|
|
// BEXTRL: Bit Field Extract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BEXTRL r32 r32 r32
|
|
// BEXTRL r32 m32 r32
|
|
// Construct and append a BEXTRL instruction to the active function.
|
|
func (c *Context) BEXTRL(r, mr, r1 operand.Op) {
|
|
if inst, err := x86.BEXTRL(r, mr, r1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BEXTRL: Bit Field Extract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BEXTRL r32 r32 r32
|
|
// BEXTRL r32 m32 r32
|
|
// Construct and append a BEXTRL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BEXTRL(r, mr, r1 operand.Op) { ctx.BEXTRL(r, mr, r1) }
|
|
|
|
// BEXTRQ: Bit Field Extract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BEXTRQ r64 r64 r64
|
|
// BEXTRQ r64 m64 r64
|
|
// Construct and append a BEXTRQ instruction to the active function.
|
|
func (c *Context) BEXTRQ(r, mr, r1 operand.Op) {
|
|
if inst, err := x86.BEXTRQ(r, mr, r1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BEXTRQ: Bit Field Extract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BEXTRQ r64 r64 r64
|
|
// BEXTRQ r64 m64 r64
|
|
// Construct and append a BEXTRQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BEXTRQ(r, mr, r1 operand.Op) { ctx.BEXTRQ(r, mr, r1) }
|
|
|
|
// BLENDPD: Blend Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLENDPD imm8 xmm xmm
|
|
// BLENDPD imm8 m128 xmm
|
|
// Construct and append a BLENDPD instruction to the active function.
|
|
func (c *Context) BLENDPD(i, mx, x operand.Op) {
|
|
if inst, err := x86.BLENDPD(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BLENDPD: Blend Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLENDPD imm8 xmm xmm
|
|
// BLENDPD imm8 m128 xmm
|
|
// Construct and append a BLENDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func BLENDPD(i, mx, x operand.Op) { ctx.BLENDPD(i, mx, x) }
|
|
|
|
// BLENDPS: Blend Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLENDPS imm8 xmm xmm
|
|
// BLENDPS imm8 m128 xmm
|
|
// Construct and append a BLENDPS instruction to the active function.
|
|
func (c *Context) BLENDPS(i, mx, x operand.Op) {
|
|
if inst, err := x86.BLENDPS(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BLENDPS: Blend Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLENDPS imm8 xmm xmm
|
|
// BLENDPS imm8 m128 xmm
|
|
// Construct and append a BLENDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func BLENDPS(i, mx, x operand.Op) { ctx.BLENDPS(i, mx, x) }
|
|
|
|
// BLENDVPD: Variable Blend Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLENDVPD xmm0 xmm xmm
|
|
// BLENDVPD xmm0 m128 xmm
|
|
// Construct and append a BLENDVPD instruction to the active function.
|
|
func (c *Context) BLENDVPD(x, mx, x1 operand.Op) {
|
|
if inst, err := x86.BLENDVPD(x, mx, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BLENDVPD: Variable Blend Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLENDVPD xmm0 xmm xmm
|
|
// BLENDVPD xmm0 m128 xmm
|
|
// Construct and append a BLENDVPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func BLENDVPD(x, mx, x1 operand.Op) { ctx.BLENDVPD(x, mx, x1) }
|
|
|
|
// BLENDVPS: Variable Blend Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLENDVPS xmm0 xmm xmm
|
|
// BLENDVPS xmm0 m128 xmm
|
|
// Construct and append a BLENDVPS instruction to the active function.
|
|
func (c *Context) BLENDVPS(x, mx, x1 operand.Op) {
|
|
if inst, err := x86.BLENDVPS(x, mx, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BLENDVPS: Variable Blend Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLENDVPS xmm0 xmm xmm
|
|
// BLENDVPS xmm0 m128 xmm
|
|
// Construct and append a BLENDVPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func BLENDVPS(x, mx, x1 operand.Op) { ctx.BLENDVPS(x, mx, x1) }
|
|
|
|
// BLSIL: Isolate Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSIL r32 r32
|
|
// BLSIL m32 r32
|
|
// Construct and append a BLSIL instruction to the active function.
|
|
func (c *Context) BLSIL(mr, r operand.Op) {
|
|
if inst, err := x86.BLSIL(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BLSIL: Isolate Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSIL r32 r32
|
|
// BLSIL m32 r32
|
|
// Construct and append a BLSIL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BLSIL(mr, r operand.Op) { ctx.BLSIL(mr, r) }
|
|
|
|
// BLSIQ: Isolate Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSIQ r64 r64
|
|
// BLSIQ m64 r64
|
|
// Construct and append a BLSIQ instruction to the active function.
|
|
func (c *Context) BLSIQ(mr, r operand.Op) {
|
|
if inst, err := x86.BLSIQ(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BLSIQ: Isolate Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSIQ r64 r64
|
|
// BLSIQ m64 r64
|
|
// Construct and append a BLSIQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BLSIQ(mr, r operand.Op) { ctx.BLSIQ(mr, r) }
|
|
|
|
// BLSMSKL: Mask From Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSMSKL r32 r32
|
|
// BLSMSKL m32 r32
|
|
// Construct and append a BLSMSKL instruction to the active function.
|
|
func (c *Context) BLSMSKL(mr, r operand.Op) {
|
|
if inst, err := x86.BLSMSKL(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BLSMSKL: Mask From Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSMSKL r32 r32
|
|
// BLSMSKL m32 r32
|
|
// Construct and append a BLSMSKL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BLSMSKL(mr, r operand.Op) { ctx.BLSMSKL(mr, r) }
|
|
|
|
// BLSMSKQ: Mask From Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSMSKQ r64 r64
|
|
// BLSMSKQ m64 r64
|
|
// Construct and append a BLSMSKQ instruction to the active function.
|
|
func (c *Context) BLSMSKQ(mr, r operand.Op) {
|
|
if inst, err := x86.BLSMSKQ(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BLSMSKQ: Mask From Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSMSKQ r64 r64
|
|
// BLSMSKQ m64 r64
|
|
// Construct and append a BLSMSKQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BLSMSKQ(mr, r operand.Op) { ctx.BLSMSKQ(mr, r) }
|
|
|
|
// BLSRL: Reset Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSRL r32 r32
|
|
// BLSRL m32 r32
|
|
// Construct and append a BLSRL instruction to the active function.
|
|
func (c *Context) BLSRL(mr, r operand.Op) {
|
|
if inst, err := x86.BLSRL(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BLSRL: Reset Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSRL r32 r32
|
|
// BLSRL m32 r32
|
|
// Construct and append a BLSRL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BLSRL(mr, r operand.Op) { ctx.BLSRL(mr, r) }
|
|
|
|
// BLSRQ: Reset Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSRQ r64 r64
|
|
// BLSRQ m64 r64
|
|
// Construct and append a BLSRQ instruction to the active function.
|
|
func (c *Context) BLSRQ(mr, r operand.Op) {
|
|
if inst, err := x86.BLSRQ(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BLSRQ: Reset Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSRQ r64 r64
|
|
// BLSRQ m64 r64
|
|
// Construct and append a BLSRQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BLSRQ(mr, r operand.Op) { ctx.BLSRQ(mr, r) }
|
|
|
|
// BSFL: Bit Scan Forward.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSFL r32 r32
|
|
// BSFL m32 r32
|
|
// Construct and append a BSFL instruction to the active function.
|
|
func (c *Context) BSFL(mr, r operand.Op) {
|
|
if inst, err := x86.BSFL(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BSFL: Bit Scan Forward.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSFL r32 r32
|
|
// BSFL m32 r32
|
|
// Construct and append a BSFL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BSFL(mr, r operand.Op) { ctx.BSFL(mr, r) }
|
|
|
|
// BSFQ: Bit Scan Forward.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSFQ r64 r64
|
|
// BSFQ m64 r64
|
|
// Construct and append a BSFQ instruction to the active function.
|
|
func (c *Context) BSFQ(mr, r operand.Op) {
|
|
if inst, err := x86.BSFQ(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BSFQ: Bit Scan Forward.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSFQ r64 r64
|
|
// BSFQ m64 r64
|
|
// Construct and append a BSFQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BSFQ(mr, r operand.Op) { ctx.BSFQ(mr, r) }
|
|
|
|
// BSFW: Bit Scan Forward.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSFW r16 r16
|
|
// BSFW m16 r16
|
|
// Construct and append a BSFW instruction to the active function.
|
|
func (c *Context) BSFW(mr, r operand.Op) {
|
|
if inst, err := x86.BSFW(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BSFW: Bit Scan Forward.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSFW r16 r16
|
|
// BSFW m16 r16
|
|
// Construct and append a BSFW instruction to the active function.
|
|
// Operates on the global context.
|
|
func BSFW(mr, r operand.Op) { ctx.BSFW(mr, r) }
|
|
|
|
// BSRL: Bit Scan Reverse.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSRL r32 r32
|
|
// BSRL m32 r32
|
|
// Construct and append a BSRL instruction to the active function.
|
|
func (c *Context) BSRL(mr, r operand.Op) {
|
|
if inst, err := x86.BSRL(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BSRL: Bit Scan Reverse.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSRL r32 r32
|
|
// BSRL m32 r32
|
|
// Construct and append a BSRL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BSRL(mr, r operand.Op) { ctx.BSRL(mr, r) }
|
|
|
|
// BSRQ: Bit Scan Reverse.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSRQ r64 r64
|
|
// BSRQ m64 r64
|
|
// Construct and append a BSRQ instruction to the active function.
|
|
func (c *Context) BSRQ(mr, r operand.Op) {
|
|
if inst, err := x86.BSRQ(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BSRQ: Bit Scan Reverse.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSRQ r64 r64
|
|
// BSRQ m64 r64
|
|
// Construct and append a BSRQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BSRQ(mr, r operand.Op) { ctx.BSRQ(mr, r) }
|
|
|
|
// BSRW: Bit Scan Reverse.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSRW r16 r16
|
|
// BSRW m16 r16
|
|
// Construct and append a BSRW instruction to the active function.
|
|
func (c *Context) BSRW(mr, r operand.Op) {
|
|
if inst, err := x86.BSRW(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BSRW: Bit Scan Reverse.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSRW r16 r16
|
|
// BSRW m16 r16
|
|
// Construct and append a BSRW instruction to the active function.
|
|
// Operates on the global context.
|
|
func BSRW(mr, r operand.Op) { ctx.BSRW(mr, r) }
|
|
|
|
// BSWAPL: Byte Swap.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSWAPL r32
|
|
// Construct and append a BSWAPL instruction to the active function.
|
|
func (c *Context) BSWAPL(r operand.Op) {
|
|
if inst, err := x86.BSWAPL(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BSWAPL: Byte Swap.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSWAPL r32
|
|
// Construct and append a BSWAPL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BSWAPL(r operand.Op) { ctx.BSWAPL(r) }
|
|
|
|
// BSWAPQ: Byte Swap.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSWAPQ r64
|
|
// Construct and append a BSWAPQ instruction to the active function.
|
|
func (c *Context) BSWAPQ(r operand.Op) {
|
|
if inst, err := x86.BSWAPQ(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BSWAPQ: Byte Swap.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSWAPQ r64
|
|
// Construct and append a BSWAPQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BSWAPQ(r operand.Op) { ctx.BSWAPQ(r) }
|
|
|
|
// BTCL: Bit Test and Complement.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTCL imm8 r32
|
|
// BTCL r32 r32
|
|
// BTCL imm8 m32
|
|
// BTCL r32 m32
|
|
// Construct and append a BTCL instruction to the active function.
|
|
func (c *Context) BTCL(ir, mr operand.Op) {
|
|
if inst, err := x86.BTCL(ir, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BTCL: Bit Test and Complement.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTCL imm8 r32
|
|
// BTCL r32 r32
|
|
// BTCL imm8 m32
|
|
// BTCL r32 m32
|
|
// Construct and append a BTCL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTCL(ir, mr operand.Op) { ctx.BTCL(ir, mr) }
|
|
|
|
// BTCQ: Bit Test and Complement.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTCQ imm8 r64
|
|
// BTCQ r64 r64
|
|
// BTCQ imm8 m64
|
|
// BTCQ r64 m64
|
|
// Construct and append a BTCQ instruction to the active function.
|
|
func (c *Context) BTCQ(ir, mr operand.Op) {
|
|
if inst, err := x86.BTCQ(ir, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BTCQ: Bit Test and Complement.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTCQ imm8 r64
|
|
// BTCQ r64 r64
|
|
// BTCQ imm8 m64
|
|
// BTCQ r64 m64
|
|
// Construct and append a BTCQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTCQ(ir, mr operand.Op) { ctx.BTCQ(ir, mr) }
|
|
|
|
// BTCW: Bit Test and Complement.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTCW imm8 r16
|
|
// BTCW r16 r16
|
|
// BTCW imm8 m16
|
|
// BTCW r16 m16
|
|
// Construct and append a BTCW instruction to the active function.
|
|
func (c *Context) BTCW(ir, mr operand.Op) {
|
|
if inst, err := x86.BTCW(ir, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BTCW: Bit Test and Complement.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTCW imm8 r16
|
|
// BTCW r16 r16
|
|
// BTCW imm8 m16
|
|
// BTCW r16 m16
|
|
// Construct and append a BTCW instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTCW(ir, mr operand.Op) { ctx.BTCW(ir, mr) }
|
|
|
|
// BTL: Bit Test.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTL imm8 r32
|
|
// BTL r32 r32
|
|
// BTL imm8 m32
|
|
// BTL r32 m32
|
|
// Construct and append a BTL instruction to the active function.
|
|
func (c *Context) BTL(ir, mr operand.Op) {
|
|
if inst, err := x86.BTL(ir, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BTL: Bit Test.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTL imm8 r32
|
|
// BTL r32 r32
|
|
// BTL imm8 m32
|
|
// BTL r32 m32
|
|
// Construct and append a BTL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTL(ir, mr operand.Op) { ctx.BTL(ir, mr) }
|
|
|
|
// BTQ: Bit Test.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTQ imm8 r64
|
|
// BTQ r64 r64
|
|
// BTQ imm8 m64
|
|
// BTQ r64 m64
|
|
// Construct and append a BTQ instruction to the active function.
|
|
func (c *Context) BTQ(ir, mr operand.Op) {
|
|
if inst, err := x86.BTQ(ir, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BTQ: Bit Test.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTQ imm8 r64
|
|
// BTQ r64 r64
|
|
// BTQ imm8 m64
|
|
// BTQ r64 m64
|
|
// Construct and append a BTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTQ(ir, mr operand.Op) { ctx.BTQ(ir, mr) }
|
|
|
|
// BTRL: Bit Test and Reset.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTRL imm8 r32
|
|
// BTRL r32 r32
|
|
// BTRL imm8 m32
|
|
// BTRL r32 m32
|
|
// Construct and append a BTRL instruction to the active function.
|
|
func (c *Context) BTRL(ir, mr operand.Op) {
|
|
if inst, err := x86.BTRL(ir, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BTRL: Bit Test and Reset.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTRL imm8 r32
|
|
// BTRL r32 r32
|
|
// BTRL imm8 m32
|
|
// BTRL r32 m32
|
|
// Construct and append a BTRL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTRL(ir, mr operand.Op) { ctx.BTRL(ir, mr) }
|
|
|
|
// BTRQ: Bit Test and Reset.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTRQ imm8 r64
|
|
// BTRQ r64 r64
|
|
// BTRQ imm8 m64
|
|
// BTRQ r64 m64
|
|
// Construct and append a BTRQ instruction to the active function.
|
|
func (c *Context) BTRQ(ir, mr operand.Op) {
|
|
if inst, err := x86.BTRQ(ir, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BTRQ: Bit Test and Reset.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTRQ imm8 r64
|
|
// BTRQ r64 r64
|
|
// BTRQ imm8 m64
|
|
// BTRQ r64 m64
|
|
// Construct and append a BTRQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTRQ(ir, mr operand.Op) { ctx.BTRQ(ir, mr) }
|
|
|
|
// BTRW: Bit Test and Reset.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTRW imm8 r16
|
|
// BTRW r16 r16
|
|
// BTRW imm8 m16
|
|
// BTRW r16 m16
|
|
// Construct and append a BTRW instruction to the active function.
|
|
func (c *Context) BTRW(ir, mr operand.Op) {
|
|
if inst, err := x86.BTRW(ir, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BTRW: Bit Test and Reset.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTRW imm8 r16
|
|
// BTRW r16 r16
|
|
// BTRW imm8 m16
|
|
// BTRW r16 m16
|
|
// Construct and append a BTRW instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTRW(ir, mr operand.Op) { ctx.BTRW(ir, mr) }
|
|
|
|
// BTSL: Bit Test and Set.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTSL imm8 r32
|
|
// BTSL r32 r32
|
|
// BTSL imm8 m32
|
|
// BTSL r32 m32
|
|
// Construct and append a BTSL instruction to the active function.
|
|
func (c *Context) BTSL(ir, mr operand.Op) {
|
|
if inst, err := x86.BTSL(ir, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BTSL: Bit Test and Set.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTSL imm8 r32
|
|
// BTSL r32 r32
|
|
// BTSL imm8 m32
|
|
// BTSL r32 m32
|
|
// Construct and append a BTSL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTSL(ir, mr operand.Op) { ctx.BTSL(ir, mr) }
|
|
|
|
// BTSQ: Bit Test and Set.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTSQ imm8 r64
|
|
// BTSQ r64 r64
|
|
// BTSQ imm8 m64
|
|
// BTSQ r64 m64
|
|
// Construct and append a BTSQ instruction to the active function.
|
|
func (c *Context) BTSQ(ir, mr operand.Op) {
|
|
if inst, err := x86.BTSQ(ir, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BTSQ: Bit Test and Set.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTSQ imm8 r64
|
|
// BTSQ r64 r64
|
|
// BTSQ imm8 m64
|
|
// BTSQ r64 m64
|
|
// Construct and append a BTSQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTSQ(ir, mr operand.Op) { ctx.BTSQ(ir, mr) }
|
|
|
|
// BTSW: Bit Test and Set.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTSW imm8 r16
|
|
// BTSW r16 r16
|
|
// BTSW imm8 m16
|
|
// BTSW r16 m16
|
|
// Construct and append a BTSW instruction to the active function.
|
|
func (c *Context) BTSW(ir, mr operand.Op) {
|
|
if inst, err := x86.BTSW(ir, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BTSW: Bit Test and Set.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTSW imm8 r16
|
|
// BTSW r16 r16
|
|
// BTSW imm8 m16
|
|
// BTSW r16 m16
|
|
// Construct and append a BTSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTSW(ir, mr operand.Op) { ctx.BTSW(ir, mr) }
|
|
|
|
// BTW: Bit Test.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTW imm8 r16
|
|
// BTW r16 r16
|
|
// BTW imm8 m16
|
|
// BTW r16 m16
|
|
// Construct and append a BTW instruction to the active function.
|
|
func (c *Context) BTW(ir, mr operand.Op) {
|
|
if inst, err := x86.BTW(ir, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BTW: Bit Test.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTW imm8 r16
|
|
// BTW r16 r16
|
|
// BTW imm8 m16
|
|
// BTW r16 m16
|
|
// Construct and append a BTW instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTW(ir, mr operand.Op) { ctx.BTW(ir, mr) }
|
|
|
|
// BZHIL: Zero High Bits Starting with Specified Bit Position.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BZHIL r32 r32 r32
|
|
// BZHIL r32 m32 r32
|
|
// Construct and append a BZHIL instruction to the active function.
|
|
func (c *Context) BZHIL(r, mr, r1 operand.Op) {
|
|
if inst, err := x86.BZHIL(r, mr, r1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BZHIL: Zero High Bits Starting with Specified Bit Position.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BZHIL r32 r32 r32
|
|
// BZHIL r32 m32 r32
|
|
// Construct and append a BZHIL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BZHIL(r, mr, r1 operand.Op) { ctx.BZHIL(r, mr, r1) }
|
|
|
|
// BZHIQ: Zero High Bits Starting with Specified Bit Position.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BZHIQ r64 r64 r64
|
|
// BZHIQ r64 m64 r64
|
|
// Construct and append a BZHIQ instruction to the active function.
|
|
func (c *Context) BZHIQ(r, mr, r1 operand.Op) {
|
|
if inst, err := x86.BZHIQ(r, mr, r1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// BZHIQ: Zero High Bits Starting with Specified Bit Position.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BZHIQ r64 r64 r64
|
|
// BZHIQ r64 m64 r64
|
|
// Construct and append a BZHIQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BZHIQ(r, mr, r1 operand.Op) { ctx.BZHIQ(r, mr, r1) }
|
|
|
|
// CALL: Call Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CALL rel32
|
|
// Construct and append a CALL instruction to the active function.
|
|
func (c *Context) CALL(r operand.Op) {
|
|
if inst, err := x86.CALL(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CALL: Call Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CALL rel32
|
|
// Construct and append a CALL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CALL(r operand.Op) { ctx.CALL(r) }
|
|
|
|
// CBW: Convert Byte to Word.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CBW
|
|
// Construct and append a CBW instruction to the active function.
|
|
func (c *Context) CBW() {
|
|
if inst, err := x86.CBW(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CBW: Convert Byte to Word.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CBW
|
|
// Construct and append a CBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func CBW() { ctx.CBW() }
|
|
|
|
// CDQ: Convert Doubleword to Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CDQ
|
|
// Construct and append a CDQ instruction to the active function.
|
|
func (c *Context) CDQ() {
|
|
if inst, err := x86.CDQ(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CDQ: Convert Doubleword to Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CDQ
|
|
// Construct and append a CDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func CDQ() { ctx.CDQ() }
|
|
|
|
// CDQE: Convert Doubleword to Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CDQE
|
|
// Construct and append a CDQE instruction to the active function.
|
|
func (c *Context) CDQE() {
|
|
if inst, err := x86.CDQE(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CDQE: Convert Doubleword to Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CDQE
|
|
// Construct and append a CDQE instruction to the active function.
|
|
// Operates on the global context.
|
|
func CDQE() { ctx.CDQE() }
|
|
|
|
// CLC: Clear Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CLC
|
|
// Construct and append a CLC instruction to the active function.
|
|
func (c *Context) CLC() {
|
|
if inst, err := x86.CLC(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CLC: Clear Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CLC
|
|
// Construct and append a CLC instruction to the active function.
|
|
// Operates on the global context.
|
|
func CLC() { ctx.CLC() }
|
|
|
|
// CLD: Clear Direction Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CLD
|
|
// Construct and append a CLD instruction to the active function.
|
|
func (c *Context) CLD() {
|
|
if inst, err := x86.CLD(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CLD: Clear Direction Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CLD
|
|
// Construct and append a CLD instruction to the active function.
|
|
// Operates on the global context.
|
|
func CLD() { ctx.CLD() }
|
|
|
|
// CLFLUSH: Flush Cache Line.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CLFLUSH m8
|
|
// Construct and append a CLFLUSH instruction to the active function.
|
|
func (c *Context) CLFLUSH(m operand.Op) {
|
|
if inst, err := x86.CLFLUSH(m); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CLFLUSH: Flush Cache Line.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CLFLUSH m8
|
|
// Construct and append a CLFLUSH instruction to the active function.
|
|
// Operates on the global context.
|
|
func CLFLUSH(m operand.Op) { ctx.CLFLUSH(m) }
|
|
|
|
// CLFLUSHOPT: Flush Cache Line Optimized.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CLFLUSHOPT m8
|
|
// Construct and append a CLFLUSHOPT instruction to the active function.
|
|
func (c *Context) CLFLUSHOPT(m operand.Op) {
|
|
if inst, err := x86.CLFLUSHOPT(m); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CLFLUSHOPT: Flush Cache Line Optimized.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CLFLUSHOPT m8
|
|
// Construct and append a CLFLUSHOPT instruction to the active function.
|
|
// Operates on the global context.
|
|
func CLFLUSHOPT(m operand.Op) { ctx.CLFLUSHOPT(m) }
|
|
|
|
// CMC: Complement Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMC
|
|
// Construct and append a CMC instruction to the active function.
|
|
func (c *Context) CMC() {
|
|
if inst, err := x86.CMC(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMC: Complement Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMC
|
|
// Construct and append a CMC instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMC() { ctx.CMC() }
|
|
|
|
// CMOVLCC: Move if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLCC r32 r32
|
|
// CMOVLCC m32 r32
|
|
// Construct and append a CMOVLCC instruction to the active function.
|
|
func (c *Context) CMOVLCC(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVLCC(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVLCC: Move if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLCC r32 r32
|
|
// CMOVLCC m32 r32
|
|
// Construct and append a CMOVLCC instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLCC(mr, r operand.Op) { ctx.CMOVLCC(mr, r) }
|
|
|
|
// CMOVLCS: Move if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLCS r32 r32
|
|
// CMOVLCS m32 r32
|
|
// Construct and append a CMOVLCS instruction to the active function.
|
|
func (c *Context) CMOVLCS(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVLCS(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVLCS: Move if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLCS r32 r32
|
|
// CMOVLCS m32 r32
|
|
// Construct and append a CMOVLCS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLCS(mr, r operand.Op) { ctx.CMOVLCS(mr, r) }
|
|
|
|
// CMOVLEQ: Move if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLEQ r32 r32
|
|
// CMOVLEQ m32 r32
|
|
// Construct and append a CMOVLEQ instruction to the active function.
|
|
func (c *Context) CMOVLEQ(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVLEQ(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVLEQ: Move if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLEQ r32 r32
|
|
// CMOVLEQ m32 r32
|
|
// Construct and append a CMOVLEQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLEQ(mr, r operand.Op) { ctx.CMOVLEQ(mr, r) }
|
|
|
|
// CMOVLGE: Move if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLGE r32 r32
|
|
// CMOVLGE m32 r32
|
|
// Construct and append a CMOVLGE instruction to the active function.
|
|
func (c *Context) CMOVLGE(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVLGE(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVLGE: Move if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLGE r32 r32
|
|
// CMOVLGE m32 r32
|
|
// Construct and append a CMOVLGE instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLGE(mr, r operand.Op) { ctx.CMOVLGE(mr, r) }
|
|
|
|
// CMOVLGT: Move if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLGT r32 r32
|
|
// CMOVLGT m32 r32
|
|
// Construct and append a CMOVLGT instruction to the active function.
|
|
func (c *Context) CMOVLGT(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVLGT(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVLGT: Move if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLGT r32 r32
|
|
// CMOVLGT m32 r32
|
|
// Construct and append a CMOVLGT instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLGT(mr, r operand.Op) { ctx.CMOVLGT(mr, r) }
|
|
|
|
// CMOVLHI: Move if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLHI r32 r32
|
|
// CMOVLHI m32 r32
|
|
// Construct and append a CMOVLHI instruction to the active function.
|
|
func (c *Context) CMOVLHI(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVLHI(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVLHI: Move if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLHI r32 r32
|
|
// CMOVLHI m32 r32
|
|
// Construct and append a CMOVLHI instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLHI(mr, r operand.Op) { ctx.CMOVLHI(mr, r) }
|
|
|
|
// CMOVLLE: Move if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLLE r32 r32
|
|
// CMOVLLE m32 r32
|
|
// Construct and append a CMOVLLE instruction to the active function.
|
|
func (c *Context) CMOVLLE(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVLLE(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVLLE: Move if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLLE r32 r32
|
|
// CMOVLLE m32 r32
|
|
// Construct and append a CMOVLLE instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLLE(mr, r operand.Op) { ctx.CMOVLLE(mr, r) }
|
|
|
|
// CMOVLLS: Move if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLLS r32 r32
|
|
// CMOVLLS m32 r32
|
|
// Construct and append a CMOVLLS instruction to the active function.
|
|
func (c *Context) CMOVLLS(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVLLS(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVLLS: Move if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLLS r32 r32
|
|
// CMOVLLS m32 r32
|
|
// Construct and append a CMOVLLS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLLS(mr, r operand.Op) { ctx.CMOVLLS(mr, r) }
|
|
|
|
// CMOVLLT: Move if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLLT r32 r32
|
|
// CMOVLLT m32 r32
|
|
// Construct and append a CMOVLLT instruction to the active function.
|
|
func (c *Context) CMOVLLT(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVLLT(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVLLT: Move if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLLT r32 r32
|
|
// CMOVLLT m32 r32
|
|
// Construct and append a CMOVLLT instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLLT(mr, r operand.Op) { ctx.CMOVLLT(mr, r) }
|
|
|
|
// CMOVLMI: Move if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLMI r32 r32
|
|
// CMOVLMI m32 r32
|
|
// Construct and append a CMOVLMI instruction to the active function.
|
|
func (c *Context) CMOVLMI(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVLMI(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVLMI: Move if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLMI r32 r32
|
|
// CMOVLMI m32 r32
|
|
// Construct and append a CMOVLMI instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLMI(mr, r operand.Op) { ctx.CMOVLMI(mr, r) }
|
|
|
|
// CMOVLNE: Move if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLNE r32 r32
|
|
// CMOVLNE m32 r32
|
|
// Construct and append a CMOVLNE instruction to the active function.
|
|
func (c *Context) CMOVLNE(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVLNE(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVLNE: Move if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLNE r32 r32
|
|
// CMOVLNE m32 r32
|
|
// Construct and append a CMOVLNE instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLNE(mr, r operand.Op) { ctx.CMOVLNE(mr, r) }
|
|
|
|
// CMOVLOC: Move if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLOC r32 r32
|
|
// CMOVLOC m32 r32
|
|
// Construct and append a CMOVLOC instruction to the active function.
|
|
func (c *Context) CMOVLOC(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVLOC(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVLOC: Move if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLOC r32 r32
|
|
// CMOVLOC m32 r32
|
|
// Construct and append a CMOVLOC instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLOC(mr, r operand.Op) { ctx.CMOVLOC(mr, r) }
|
|
|
|
// CMOVLOS: Move if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLOS r32 r32
|
|
// CMOVLOS m32 r32
|
|
// Construct and append a CMOVLOS instruction to the active function.
|
|
func (c *Context) CMOVLOS(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVLOS(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVLOS: Move if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLOS r32 r32
|
|
// CMOVLOS m32 r32
|
|
// Construct and append a CMOVLOS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLOS(mr, r operand.Op) { ctx.CMOVLOS(mr, r) }
|
|
|
|
// CMOVLPC: Move if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLPC r32 r32
|
|
// CMOVLPC m32 r32
|
|
// Construct and append a CMOVLPC instruction to the active function.
|
|
func (c *Context) CMOVLPC(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVLPC(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVLPC: Move if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLPC r32 r32
|
|
// CMOVLPC m32 r32
|
|
// Construct and append a CMOVLPC instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLPC(mr, r operand.Op) { ctx.CMOVLPC(mr, r) }
|
|
|
|
// CMOVLPL: Move if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLPL r32 r32
|
|
// CMOVLPL m32 r32
|
|
// Construct and append a CMOVLPL instruction to the active function.
|
|
func (c *Context) CMOVLPL(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVLPL(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVLPL: Move if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLPL r32 r32
|
|
// CMOVLPL m32 r32
|
|
// Construct and append a CMOVLPL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLPL(mr, r operand.Op) { ctx.CMOVLPL(mr, r) }
|
|
|
|
// CMOVLPS: Move if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLPS r32 r32
|
|
// CMOVLPS m32 r32
|
|
// Construct and append a CMOVLPS instruction to the active function.
|
|
func (c *Context) CMOVLPS(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVLPS(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVLPS: Move if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLPS r32 r32
|
|
// CMOVLPS m32 r32
|
|
// Construct and append a CMOVLPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLPS(mr, r operand.Op) { ctx.CMOVLPS(mr, r) }
|
|
|
|
// CMOVQCC: Move if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQCC r64 r64
|
|
// CMOVQCC m64 r64
|
|
// Construct and append a CMOVQCC instruction to the active function.
|
|
func (c *Context) CMOVQCC(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVQCC(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVQCC: Move if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQCC r64 r64
|
|
// CMOVQCC m64 r64
|
|
// Construct and append a CMOVQCC instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQCC(mr, r operand.Op) { ctx.CMOVQCC(mr, r) }
|
|
|
|
// CMOVQCS: Move if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQCS r64 r64
|
|
// CMOVQCS m64 r64
|
|
// Construct and append a CMOVQCS instruction to the active function.
|
|
func (c *Context) CMOVQCS(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVQCS(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVQCS: Move if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQCS r64 r64
|
|
// CMOVQCS m64 r64
|
|
// Construct and append a CMOVQCS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQCS(mr, r operand.Op) { ctx.CMOVQCS(mr, r) }
|
|
|
|
// CMOVQEQ: Move if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQEQ r64 r64
|
|
// CMOVQEQ m64 r64
|
|
// Construct and append a CMOVQEQ instruction to the active function.
|
|
func (c *Context) CMOVQEQ(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVQEQ(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVQEQ: Move if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQEQ r64 r64
|
|
// CMOVQEQ m64 r64
|
|
// Construct and append a CMOVQEQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQEQ(mr, r operand.Op) { ctx.CMOVQEQ(mr, r) }
|
|
|
|
// CMOVQGE: Move if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQGE r64 r64
|
|
// CMOVQGE m64 r64
|
|
// Construct and append a CMOVQGE instruction to the active function.
|
|
func (c *Context) CMOVQGE(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVQGE(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVQGE: Move if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQGE r64 r64
|
|
// CMOVQGE m64 r64
|
|
// Construct and append a CMOVQGE instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQGE(mr, r operand.Op) { ctx.CMOVQGE(mr, r) }
|
|
|
|
// CMOVQGT: Move if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQGT r64 r64
|
|
// CMOVQGT m64 r64
|
|
// Construct and append a CMOVQGT instruction to the active function.
|
|
func (c *Context) CMOVQGT(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVQGT(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVQGT: Move if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQGT r64 r64
|
|
// CMOVQGT m64 r64
|
|
// Construct and append a CMOVQGT instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQGT(mr, r operand.Op) { ctx.CMOVQGT(mr, r) }
|
|
|
|
// CMOVQHI: Move if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQHI r64 r64
|
|
// CMOVQHI m64 r64
|
|
// Construct and append a CMOVQHI instruction to the active function.
|
|
func (c *Context) CMOVQHI(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVQHI(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVQHI: Move if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQHI r64 r64
|
|
// CMOVQHI m64 r64
|
|
// Construct and append a CMOVQHI instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQHI(mr, r operand.Op) { ctx.CMOVQHI(mr, r) }
|
|
|
|
// CMOVQLE: Move if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQLE r64 r64
|
|
// CMOVQLE m64 r64
|
|
// Construct and append a CMOVQLE instruction to the active function.
|
|
func (c *Context) CMOVQLE(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVQLE(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVQLE: Move if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQLE r64 r64
|
|
// CMOVQLE m64 r64
|
|
// Construct and append a CMOVQLE instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQLE(mr, r operand.Op) { ctx.CMOVQLE(mr, r) }
|
|
|
|
// CMOVQLS: Move if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQLS r64 r64
|
|
// CMOVQLS m64 r64
|
|
// Construct and append a CMOVQLS instruction to the active function.
|
|
func (c *Context) CMOVQLS(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVQLS(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVQLS: Move if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQLS r64 r64
|
|
// CMOVQLS m64 r64
|
|
// Construct and append a CMOVQLS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQLS(mr, r operand.Op) { ctx.CMOVQLS(mr, r) }
|
|
|
|
// CMOVQLT: Move if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQLT r64 r64
|
|
// CMOVQLT m64 r64
|
|
// Construct and append a CMOVQLT instruction to the active function.
|
|
func (c *Context) CMOVQLT(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVQLT(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVQLT: Move if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQLT r64 r64
|
|
// CMOVQLT m64 r64
|
|
// Construct and append a CMOVQLT instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQLT(mr, r operand.Op) { ctx.CMOVQLT(mr, r) }
|
|
|
|
// CMOVQMI: Move if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQMI r64 r64
|
|
// CMOVQMI m64 r64
|
|
// Construct and append a CMOVQMI instruction to the active function.
|
|
func (c *Context) CMOVQMI(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVQMI(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVQMI: Move if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQMI r64 r64
|
|
// CMOVQMI m64 r64
|
|
// Construct and append a CMOVQMI instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQMI(mr, r operand.Op) { ctx.CMOVQMI(mr, r) }
|
|
|
|
// CMOVQNE: Move if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQNE r64 r64
|
|
// CMOVQNE m64 r64
|
|
// Construct and append a CMOVQNE instruction to the active function.
|
|
func (c *Context) CMOVQNE(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVQNE(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVQNE: Move if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQNE r64 r64
|
|
// CMOVQNE m64 r64
|
|
// Construct and append a CMOVQNE instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQNE(mr, r operand.Op) { ctx.CMOVQNE(mr, r) }
|
|
|
|
// CMOVQOC: Move if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQOC r64 r64
|
|
// CMOVQOC m64 r64
|
|
// Construct and append a CMOVQOC instruction to the active function.
|
|
func (c *Context) CMOVQOC(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVQOC(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVQOC: Move if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQOC r64 r64
|
|
// CMOVQOC m64 r64
|
|
// Construct and append a CMOVQOC instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQOC(mr, r operand.Op) { ctx.CMOVQOC(mr, r) }
|
|
|
|
// CMOVQOS: Move if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQOS r64 r64
|
|
// CMOVQOS m64 r64
|
|
// Construct and append a CMOVQOS instruction to the active function.
|
|
func (c *Context) CMOVQOS(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVQOS(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVQOS: Move if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQOS r64 r64
|
|
// CMOVQOS m64 r64
|
|
// Construct and append a CMOVQOS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQOS(mr, r operand.Op) { ctx.CMOVQOS(mr, r) }
|
|
|
|
// CMOVQPC: Move if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQPC r64 r64
|
|
// CMOVQPC m64 r64
|
|
// Construct and append a CMOVQPC instruction to the active function.
|
|
func (c *Context) CMOVQPC(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVQPC(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVQPC: Move if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQPC r64 r64
|
|
// CMOVQPC m64 r64
|
|
// Construct and append a CMOVQPC instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQPC(mr, r operand.Op) { ctx.CMOVQPC(mr, r) }
|
|
|
|
// CMOVQPL: Move if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQPL r64 r64
|
|
// CMOVQPL m64 r64
|
|
// Construct and append a CMOVQPL instruction to the active function.
|
|
func (c *Context) CMOVQPL(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVQPL(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVQPL: Move if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQPL r64 r64
|
|
// CMOVQPL m64 r64
|
|
// Construct and append a CMOVQPL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQPL(mr, r operand.Op) { ctx.CMOVQPL(mr, r) }
|
|
|
|
// CMOVQPS: Move if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQPS r64 r64
|
|
// CMOVQPS m64 r64
|
|
// Construct and append a CMOVQPS instruction to the active function.
|
|
func (c *Context) CMOVQPS(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVQPS(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVQPS: Move if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQPS r64 r64
|
|
// CMOVQPS m64 r64
|
|
// Construct and append a CMOVQPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQPS(mr, r operand.Op) { ctx.CMOVQPS(mr, r) }
|
|
|
|
// CMOVWCC: Move if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWCC r16 r16
|
|
// CMOVWCC m16 r16
|
|
// Construct and append a CMOVWCC instruction to the active function.
|
|
func (c *Context) CMOVWCC(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVWCC(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVWCC: Move if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWCC r16 r16
|
|
// CMOVWCC m16 r16
|
|
// Construct and append a CMOVWCC instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWCC(mr, r operand.Op) { ctx.CMOVWCC(mr, r) }
|
|
|
|
// CMOVWCS: Move if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWCS r16 r16
|
|
// CMOVWCS m16 r16
|
|
// Construct and append a CMOVWCS instruction to the active function.
|
|
func (c *Context) CMOVWCS(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVWCS(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVWCS: Move if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWCS r16 r16
|
|
// CMOVWCS m16 r16
|
|
// Construct and append a CMOVWCS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWCS(mr, r operand.Op) { ctx.CMOVWCS(mr, r) }
|
|
|
|
// CMOVWEQ: Move if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWEQ r16 r16
|
|
// CMOVWEQ m16 r16
|
|
// Construct and append a CMOVWEQ instruction to the active function.
|
|
func (c *Context) CMOVWEQ(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVWEQ(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVWEQ: Move if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWEQ r16 r16
|
|
// CMOVWEQ m16 r16
|
|
// Construct and append a CMOVWEQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWEQ(mr, r operand.Op) { ctx.CMOVWEQ(mr, r) }
|
|
|
|
// CMOVWGE: Move if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWGE r16 r16
|
|
// CMOVWGE m16 r16
|
|
// Construct and append a CMOVWGE instruction to the active function.
|
|
func (c *Context) CMOVWGE(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVWGE(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVWGE: Move if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWGE r16 r16
|
|
// CMOVWGE m16 r16
|
|
// Construct and append a CMOVWGE instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWGE(mr, r operand.Op) { ctx.CMOVWGE(mr, r) }
|
|
|
|
// CMOVWGT: Move if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWGT r16 r16
|
|
// CMOVWGT m16 r16
|
|
// Construct and append a CMOVWGT instruction to the active function.
|
|
func (c *Context) CMOVWGT(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVWGT(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVWGT: Move if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWGT r16 r16
|
|
// CMOVWGT m16 r16
|
|
// Construct and append a CMOVWGT instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWGT(mr, r operand.Op) { ctx.CMOVWGT(mr, r) }
|
|
|
|
// CMOVWHI: Move if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWHI r16 r16
|
|
// CMOVWHI m16 r16
|
|
// Construct and append a CMOVWHI instruction to the active function.
|
|
func (c *Context) CMOVWHI(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVWHI(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVWHI: Move if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWHI r16 r16
|
|
// CMOVWHI m16 r16
|
|
// Construct and append a CMOVWHI instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWHI(mr, r operand.Op) { ctx.CMOVWHI(mr, r) }
|
|
|
|
// CMOVWLE: Move if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWLE r16 r16
|
|
// CMOVWLE m16 r16
|
|
// Construct and append a CMOVWLE instruction to the active function.
|
|
func (c *Context) CMOVWLE(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVWLE(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVWLE: Move if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWLE r16 r16
|
|
// CMOVWLE m16 r16
|
|
// Construct and append a CMOVWLE instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWLE(mr, r operand.Op) { ctx.CMOVWLE(mr, r) }
|
|
|
|
// CMOVWLS: Move if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWLS r16 r16
|
|
// CMOVWLS m16 r16
|
|
// Construct and append a CMOVWLS instruction to the active function.
|
|
func (c *Context) CMOVWLS(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVWLS(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVWLS: Move if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWLS r16 r16
|
|
// CMOVWLS m16 r16
|
|
// Construct and append a CMOVWLS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWLS(mr, r operand.Op) { ctx.CMOVWLS(mr, r) }
|
|
|
|
// CMOVWLT: Move if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWLT r16 r16
|
|
// CMOVWLT m16 r16
|
|
// Construct and append a CMOVWLT instruction to the active function.
|
|
func (c *Context) CMOVWLT(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVWLT(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVWLT: Move if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWLT r16 r16
|
|
// CMOVWLT m16 r16
|
|
// Construct and append a CMOVWLT instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWLT(mr, r operand.Op) { ctx.CMOVWLT(mr, r) }
|
|
|
|
// CMOVWMI: Move if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWMI r16 r16
|
|
// CMOVWMI m16 r16
|
|
// Construct and append a CMOVWMI instruction to the active function.
|
|
func (c *Context) CMOVWMI(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVWMI(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVWMI: Move if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWMI r16 r16
|
|
// CMOVWMI m16 r16
|
|
// Construct and append a CMOVWMI instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWMI(mr, r operand.Op) { ctx.CMOVWMI(mr, r) }
|
|
|
|
// CMOVWNE: Move if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWNE r16 r16
|
|
// CMOVWNE m16 r16
|
|
// Construct and append a CMOVWNE instruction to the active function.
|
|
func (c *Context) CMOVWNE(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVWNE(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVWNE: Move if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWNE r16 r16
|
|
// CMOVWNE m16 r16
|
|
// Construct and append a CMOVWNE instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWNE(mr, r operand.Op) { ctx.CMOVWNE(mr, r) }
|
|
|
|
// CMOVWOC: Move if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWOC r16 r16
|
|
// CMOVWOC m16 r16
|
|
// Construct and append a CMOVWOC instruction to the active function.
|
|
func (c *Context) CMOVWOC(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVWOC(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVWOC: Move if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWOC r16 r16
|
|
// CMOVWOC m16 r16
|
|
// Construct and append a CMOVWOC instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWOC(mr, r operand.Op) { ctx.CMOVWOC(mr, r) }
|
|
|
|
// CMOVWOS: Move if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWOS r16 r16
|
|
// CMOVWOS m16 r16
|
|
// Construct and append a CMOVWOS instruction to the active function.
|
|
func (c *Context) CMOVWOS(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVWOS(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVWOS: Move if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWOS r16 r16
|
|
// CMOVWOS m16 r16
|
|
// Construct and append a CMOVWOS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWOS(mr, r operand.Op) { ctx.CMOVWOS(mr, r) }
|
|
|
|
// CMOVWPC: Move if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWPC r16 r16
|
|
// CMOVWPC m16 r16
|
|
// Construct and append a CMOVWPC instruction to the active function.
|
|
func (c *Context) CMOVWPC(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVWPC(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVWPC: Move if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWPC r16 r16
|
|
// CMOVWPC m16 r16
|
|
// Construct and append a CMOVWPC instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWPC(mr, r operand.Op) { ctx.CMOVWPC(mr, r) }
|
|
|
|
// CMOVWPL: Move if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWPL r16 r16
|
|
// CMOVWPL m16 r16
|
|
// Construct and append a CMOVWPL instruction to the active function.
|
|
func (c *Context) CMOVWPL(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVWPL(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVWPL: Move if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWPL r16 r16
|
|
// CMOVWPL m16 r16
|
|
// Construct and append a CMOVWPL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWPL(mr, r operand.Op) { ctx.CMOVWPL(mr, r) }
|
|
|
|
// CMOVWPS: Move if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWPS r16 r16
|
|
// CMOVWPS m16 r16
|
|
// Construct and append a CMOVWPS instruction to the active function.
|
|
func (c *Context) CMOVWPS(mr, r operand.Op) {
|
|
if inst, err := x86.CMOVWPS(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMOVWPS: Move if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWPS r16 r16
|
|
// CMOVWPS m16 r16
|
|
// Construct and append a CMOVWPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWPS(mr, r operand.Op) { ctx.CMOVWPS(mr, r) }
|
|
|
|
// CMPB: Compare Two Operands.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPB al imm8
|
|
// CMPB r8 imm8
|
|
// CMPB r8 r8
|
|
// CMPB r8 m8
|
|
// CMPB m8 imm8
|
|
// CMPB m8 r8
|
|
// Construct and append a CMPB instruction to the active function.
|
|
func (c *Context) CMPB(amr, imr operand.Op) {
|
|
if inst, err := x86.CMPB(amr, imr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMPB: Compare Two Operands.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPB al imm8
|
|
// CMPB r8 imm8
|
|
// CMPB r8 r8
|
|
// CMPB r8 m8
|
|
// CMPB m8 imm8
|
|
// CMPB m8 r8
|
|
// Construct and append a CMPB instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPB(amr, imr operand.Op) { ctx.CMPB(amr, imr) }
|
|
|
|
// CMPL: Compare Two Operands.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPL eax imm32
|
|
// CMPL r32 imm8
|
|
// CMPL r32 imm32
|
|
// CMPL r32 r32
|
|
// CMPL r32 m32
|
|
// CMPL m32 imm8
|
|
// CMPL m32 imm32
|
|
// CMPL m32 r32
|
|
// Construct and append a CMPL instruction to the active function.
|
|
func (c *Context) CMPL(emr, imr operand.Op) {
|
|
if inst, err := x86.CMPL(emr, imr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMPL: Compare Two Operands.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPL eax imm32
|
|
// CMPL r32 imm8
|
|
// CMPL r32 imm32
|
|
// CMPL r32 r32
|
|
// CMPL r32 m32
|
|
// CMPL m32 imm8
|
|
// CMPL m32 imm32
|
|
// CMPL m32 r32
|
|
// Construct and append a CMPL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPL(emr, imr operand.Op) { ctx.CMPL(emr, imr) }
|
|
|
|
// CMPPD: Compare Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPPD xmm xmm imm8
|
|
// CMPPD m128 xmm imm8
|
|
// Construct and append a CMPPD instruction to the active function.
|
|
func (c *Context) CMPPD(mx, x, i operand.Op) {
|
|
if inst, err := x86.CMPPD(mx, x, i); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMPPD: Compare Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPPD xmm xmm imm8
|
|
// CMPPD m128 xmm imm8
|
|
// Construct and append a CMPPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPPD(mx, x, i operand.Op) { ctx.CMPPD(mx, x, i) }
|
|
|
|
// CMPPS: Compare Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPPS xmm xmm imm8
|
|
// CMPPS m128 xmm imm8
|
|
// Construct and append a CMPPS instruction to the active function.
|
|
func (c *Context) CMPPS(mx, x, i operand.Op) {
|
|
if inst, err := x86.CMPPS(mx, x, i); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMPPS: Compare Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPPS xmm xmm imm8
|
|
// CMPPS m128 xmm imm8
|
|
// Construct and append a CMPPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPPS(mx, x, i operand.Op) { ctx.CMPPS(mx, x, i) }
|
|
|
|
// CMPQ: Compare Two Operands.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPQ rax imm32
|
|
// CMPQ r64 imm8
|
|
// CMPQ r64 imm32
|
|
// CMPQ r64 r64
|
|
// CMPQ r64 m64
|
|
// CMPQ m64 imm8
|
|
// CMPQ m64 imm32
|
|
// CMPQ m64 r64
|
|
// Construct and append a CMPQ instruction to the active function.
|
|
func (c *Context) CMPQ(mr, imr operand.Op) {
|
|
if inst, err := x86.CMPQ(mr, imr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMPQ: Compare Two Operands.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPQ rax imm32
|
|
// CMPQ r64 imm8
|
|
// CMPQ r64 imm32
|
|
// CMPQ r64 r64
|
|
// CMPQ r64 m64
|
|
// CMPQ m64 imm8
|
|
// CMPQ m64 imm32
|
|
// CMPQ m64 r64
|
|
// Construct and append a CMPQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPQ(mr, imr operand.Op) { ctx.CMPQ(mr, imr) }
|
|
|
|
// CMPSD: Compare Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPSD xmm xmm imm8
|
|
// CMPSD m64 xmm imm8
|
|
// Construct and append a CMPSD instruction to the active function.
|
|
func (c *Context) CMPSD(mx, x, i operand.Op) {
|
|
if inst, err := x86.CMPSD(mx, x, i); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMPSD: Compare Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPSD xmm xmm imm8
|
|
// CMPSD m64 xmm imm8
|
|
// Construct and append a CMPSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPSD(mx, x, i operand.Op) { ctx.CMPSD(mx, x, i) }
|
|
|
|
// CMPSS: Compare Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPSS xmm xmm imm8
|
|
// CMPSS m32 xmm imm8
|
|
// Construct and append a CMPSS instruction to the active function.
|
|
func (c *Context) CMPSS(mx, x, i operand.Op) {
|
|
if inst, err := x86.CMPSS(mx, x, i); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMPSS: Compare Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPSS xmm xmm imm8
|
|
// CMPSS m32 xmm imm8
|
|
// Construct and append a CMPSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPSS(mx, x, i operand.Op) { ctx.CMPSS(mx, x, i) }
|
|
|
|
// CMPW: Compare Two Operands.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPW ax imm16
|
|
// CMPW r16 imm8
|
|
// CMPW r16 imm16
|
|
// CMPW r16 r16
|
|
// CMPW r16 m16
|
|
// CMPW m16 imm8
|
|
// CMPW m16 imm16
|
|
// CMPW m16 r16
|
|
// Construct and append a CMPW instruction to the active function.
|
|
func (c *Context) CMPW(amr, imr operand.Op) {
|
|
if inst, err := x86.CMPW(amr, imr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMPW: Compare Two Operands.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPW ax imm16
|
|
// CMPW r16 imm8
|
|
// CMPW r16 imm16
|
|
// CMPW r16 r16
|
|
// CMPW r16 m16
|
|
// CMPW m16 imm8
|
|
// CMPW m16 imm16
|
|
// CMPW m16 r16
|
|
// Construct and append a CMPW instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPW(amr, imr operand.Op) { ctx.CMPW(amr, imr) }
|
|
|
|
// CMPXCHG16B: Compare and Exchange 16 Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHG16B m128
|
|
// Construct and append a CMPXCHG16B instruction to the active function.
|
|
func (c *Context) CMPXCHG16B(m operand.Op) {
|
|
if inst, err := x86.CMPXCHG16B(m); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMPXCHG16B: Compare and Exchange 16 Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHG16B m128
|
|
// Construct and append a CMPXCHG16B instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPXCHG16B(m operand.Op) { ctx.CMPXCHG16B(m) }
|
|
|
|
// CMPXCHG8B: Compare and Exchange 8 Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHG8B m64
|
|
// Construct and append a CMPXCHG8B instruction to the active function.
|
|
func (c *Context) CMPXCHG8B(m operand.Op) {
|
|
if inst, err := x86.CMPXCHG8B(m); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMPXCHG8B: Compare and Exchange 8 Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHG8B m64
|
|
// Construct and append a CMPXCHG8B instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPXCHG8B(m operand.Op) { ctx.CMPXCHG8B(m) }
|
|
|
|
// CMPXCHGB: Compare and Exchange.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHGB r8 r8
|
|
// CMPXCHGB r8 m8
|
|
// Construct and append a CMPXCHGB instruction to the active function.
|
|
func (c *Context) CMPXCHGB(r, mr operand.Op) {
|
|
if inst, err := x86.CMPXCHGB(r, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMPXCHGB: Compare and Exchange.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHGB r8 r8
|
|
// CMPXCHGB r8 m8
|
|
// Construct and append a CMPXCHGB instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPXCHGB(r, mr operand.Op) { ctx.CMPXCHGB(r, mr) }
|
|
|
|
// CMPXCHGL: Compare and Exchange.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHGL r32 r32
|
|
// CMPXCHGL r32 m32
|
|
// Construct and append a CMPXCHGL instruction to the active function.
|
|
func (c *Context) CMPXCHGL(r, mr operand.Op) {
|
|
if inst, err := x86.CMPXCHGL(r, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMPXCHGL: Compare and Exchange.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHGL r32 r32
|
|
// CMPXCHGL r32 m32
|
|
// Construct and append a CMPXCHGL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPXCHGL(r, mr operand.Op) { ctx.CMPXCHGL(r, mr) }
|
|
|
|
// CMPXCHGQ: Compare and Exchange.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHGQ r64 r64
|
|
// CMPXCHGQ r64 m64
|
|
// Construct and append a CMPXCHGQ instruction to the active function.
|
|
func (c *Context) CMPXCHGQ(r, mr operand.Op) {
|
|
if inst, err := x86.CMPXCHGQ(r, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMPXCHGQ: Compare and Exchange.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHGQ r64 r64
|
|
// CMPXCHGQ r64 m64
|
|
// Construct and append a CMPXCHGQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPXCHGQ(r, mr operand.Op) { ctx.CMPXCHGQ(r, mr) }
|
|
|
|
// CMPXCHGW: Compare and Exchange.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHGW r16 r16
|
|
// CMPXCHGW r16 m16
|
|
// Construct and append a CMPXCHGW instruction to the active function.
|
|
func (c *Context) CMPXCHGW(r, mr operand.Op) {
|
|
if inst, err := x86.CMPXCHGW(r, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CMPXCHGW: Compare and Exchange.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHGW r16 r16
|
|
// CMPXCHGW r16 m16
|
|
// Construct and append a CMPXCHGW instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPXCHGW(r, mr operand.Op) { ctx.CMPXCHGW(r, mr) }
|
|
|
|
// COMISD: Compare Scalar Ordered Double-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// COMISD xmm xmm
|
|
// COMISD m64 xmm
|
|
// Construct and append a COMISD instruction to the active function.
|
|
func (c *Context) COMISD(mx, x operand.Op) {
|
|
if inst, err := x86.COMISD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// COMISD: Compare Scalar Ordered Double-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// COMISD xmm xmm
|
|
// COMISD m64 xmm
|
|
// Construct and append a COMISD instruction to the active function.
|
|
// Operates on the global context.
|
|
func COMISD(mx, x operand.Op) { ctx.COMISD(mx, x) }
|
|
|
|
// COMISS: Compare Scalar Ordered Single-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// COMISS xmm xmm
|
|
// COMISS m32 xmm
|
|
// Construct and append a COMISS instruction to the active function.
|
|
func (c *Context) COMISS(mx, x operand.Op) {
|
|
if inst, err := x86.COMISS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// COMISS: Compare Scalar Ordered Single-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// COMISS xmm xmm
|
|
// COMISS m32 xmm
|
|
// Construct and append a COMISS instruction to the active function.
|
|
// Operates on the global context.
|
|
func COMISS(mx, x operand.Op) { ctx.COMISS(mx, x) }
|
|
|
|
// CPUID: CPU Identification.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CPUID
|
|
// Construct and append a CPUID instruction to the active function.
|
|
func (c *Context) CPUID() {
|
|
if inst, err := x86.CPUID(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CPUID: CPU Identification.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CPUID
|
|
// Construct and append a CPUID instruction to the active function.
|
|
// Operates on the global context.
|
|
func CPUID() { ctx.CPUID() }
|
|
|
|
// CQO: Convert Quadword to Octaword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CQO
|
|
// Construct and append a CQO instruction to the active function.
|
|
func (c *Context) CQO() {
|
|
if inst, err := x86.CQO(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CQO: Convert Quadword to Octaword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CQO
|
|
// Construct and append a CQO instruction to the active function.
|
|
// Operates on the global context.
|
|
func CQO() { ctx.CQO() }
|
|
|
|
// CRC32B: Accumulate CRC32 Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CRC32B r8 r32
|
|
// CRC32B m8 r32
|
|
// CRC32B r8 r64
|
|
// CRC32B m8 r64
|
|
// Construct and append a CRC32B instruction to the active function.
|
|
func (c *Context) CRC32B(mr, r operand.Op) {
|
|
if inst, err := x86.CRC32B(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CRC32B: Accumulate CRC32 Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CRC32B r8 r32
|
|
// CRC32B m8 r32
|
|
// CRC32B r8 r64
|
|
// CRC32B m8 r64
|
|
// Construct and append a CRC32B instruction to the active function.
|
|
// Operates on the global context.
|
|
func CRC32B(mr, r operand.Op) { ctx.CRC32B(mr, r) }
|
|
|
|
// CRC32L: Accumulate CRC32 Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CRC32L r32 r32
|
|
// CRC32L m32 r32
|
|
// Construct and append a CRC32L instruction to the active function.
|
|
func (c *Context) CRC32L(mr, r operand.Op) {
|
|
if inst, err := x86.CRC32L(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CRC32L: Accumulate CRC32 Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CRC32L r32 r32
|
|
// CRC32L m32 r32
|
|
// Construct and append a CRC32L instruction to the active function.
|
|
// Operates on the global context.
|
|
func CRC32L(mr, r operand.Op) { ctx.CRC32L(mr, r) }
|
|
|
|
// CRC32Q: Accumulate CRC32 Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CRC32Q r64 r64
|
|
// CRC32Q m64 r64
|
|
// Construct and append a CRC32Q instruction to the active function.
|
|
func (c *Context) CRC32Q(mr, r operand.Op) {
|
|
if inst, err := x86.CRC32Q(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CRC32Q: Accumulate CRC32 Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CRC32Q r64 r64
|
|
// CRC32Q m64 r64
|
|
// Construct and append a CRC32Q instruction to the active function.
|
|
// Operates on the global context.
|
|
func CRC32Q(mr, r operand.Op) { ctx.CRC32Q(mr, r) }
|
|
|
|
// CRC32W: Accumulate CRC32 Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CRC32W r16 r32
|
|
// CRC32W m16 r32
|
|
// Construct and append a CRC32W instruction to the active function.
|
|
func (c *Context) CRC32W(mr, r operand.Op) {
|
|
if inst, err := x86.CRC32W(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CRC32W: Accumulate CRC32 Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CRC32W r16 r32
|
|
// CRC32W m16 r32
|
|
// Construct and append a CRC32W instruction to the active function.
|
|
// Operates on the global context.
|
|
func CRC32W(mr, r operand.Op) { ctx.CRC32W(mr, r) }
|
|
|
|
// CVTPD2PL: Convert Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPD2PL xmm xmm
|
|
// CVTPD2PL m128 xmm
|
|
// Construct and append a CVTPD2PL instruction to the active function.
|
|
func (c *Context) CVTPD2PL(mx, x operand.Op) {
|
|
if inst, err := x86.CVTPD2PL(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CVTPD2PL: Convert Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPD2PL xmm xmm
|
|
// CVTPD2PL m128 xmm
|
|
// Construct and append a CVTPD2PL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTPD2PL(mx, x operand.Op) { ctx.CVTPD2PL(mx, x) }
|
|
|
|
// CVTPD2PS: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPD2PS xmm xmm
|
|
// CVTPD2PS m128 xmm
|
|
// Construct and append a CVTPD2PS instruction to the active function.
|
|
func (c *Context) CVTPD2PS(mx, x operand.Op) {
|
|
if inst, err := x86.CVTPD2PS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CVTPD2PS: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPD2PS xmm xmm
|
|
// CVTPD2PS m128 xmm
|
|
// Construct and append a CVTPD2PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTPD2PS(mx, x operand.Op) { ctx.CVTPD2PS(mx, x) }
|
|
|
|
// CVTPL2PD: Convert Packed Dword Integers to Packed Double-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPL2PD xmm xmm
|
|
// CVTPL2PD m64 xmm
|
|
// Construct and append a CVTPL2PD instruction to the active function.
|
|
func (c *Context) CVTPL2PD(mx, x operand.Op) {
|
|
if inst, err := x86.CVTPL2PD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CVTPL2PD: Convert Packed Dword Integers to Packed Double-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPL2PD xmm xmm
|
|
// CVTPL2PD m64 xmm
|
|
// Construct and append a CVTPL2PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTPL2PD(mx, x operand.Op) { ctx.CVTPL2PD(mx, x) }
|
|
|
|
// CVTPL2PS: Convert Packed Dword Integers to Packed Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPL2PS xmm xmm
|
|
// CVTPL2PS m128 xmm
|
|
// Construct and append a CVTPL2PS instruction to the active function.
|
|
func (c *Context) CVTPL2PS(mx, x operand.Op) {
|
|
if inst, err := x86.CVTPL2PS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CVTPL2PS: Convert Packed Dword Integers to Packed Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPL2PS xmm xmm
|
|
// CVTPL2PS m128 xmm
|
|
// Construct and append a CVTPL2PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTPL2PS(mx, x operand.Op) { ctx.CVTPL2PS(mx, x) }
|
|
|
|
// CVTPS2PD: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPS2PD xmm xmm
|
|
// CVTPS2PD m64 xmm
|
|
// Construct and append a CVTPS2PD instruction to the active function.
|
|
func (c *Context) CVTPS2PD(mx, x operand.Op) {
|
|
if inst, err := x86.CVTPS2PD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CVTPS2PD: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPS2PD xmm xmm
|
|
// CVTPS2PD m64 xmm
|
|
// Construct and append a CVTPS2PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTPS2PD(mx, x operand.Op) { ctx.CVTPS2PD(mx, x) }
|
|
|
|
// CVTPS2PL: Convert Packed Single-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPS2PL xmm xmm
|
|
// CVTPS2PL m128 xmm
|
|
// Construct and append a CVTPS2PL instruction to the active function.
|
|
func (c *Context) CVTPS2PL(mx, x operand.Op) {
|
|
if inst, err := x86.CVTPS2PL(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CVTPS2PL: Convert Packed Single-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPS2PL xmm xmm
|
|
// CVTPS2PL m128 xmm
|
|
// Construct and append a CVTPS2PL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTPS2PL(mx, x operand.Op) { ctx.CVTPS2PL(mx, x) }
|
|
|
|
// CVTSD2SL: Convert Scalar Double-Precision FP Value to Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSD2SL xmm r32
|
|
// CVTSD2SL m64 r32
|
|
// CVTSD2SL xmm r64
|
|
// CVTSD2SL m64 r64
|
|
// Construct and append a CVTSD2SL instruction to the active function.
|
|
func (c *Context) CVTSD2SL(mx, r operand.Op) {
|
|
if inst, err := x86.CVTSD2SL(mx, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CVTSD2SL: Convert Scalar Double-Precision FP Value to Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSD2SL xmm r32
|
|
// CVTSD2SL m64 r32
|
|
// CVTSD2SL xmm r64
|
|
// CVTSD2SL m64 r64
|
|
// Construct and append a CVTSD2SL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTSD2SL(mx, r operand.Op) { ctx.CVTSD2SL(mx, r) }
|
|
|
|
// CVTSD2SS: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSD2SS xmm xmm
|
|
// CVTSD2SS m64 xmm
|
|
// Construct and append a CVTSD2SS instruction to the active function.
|
|
func (c *Context) CVTSD2SS(mx, x operand.Op) {
|
|
if inst, err := x86.CVTSD2SS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CVTSD2SS: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSD2SS xmm xmm
|
|
// CVTSD2SS m64 xmm
|
|
// Construct and append a CVTSD2SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTSD2SS(mx, x operand.Op) { ctx.CVTSD2SS(mx, x) }
|
|
|
|
// CVTSL2SD: Convert Dword Integer to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSL2SD r32 xmm
|
|
// CVTSL2SD m32 xmm
|
|
// Construct and append a CVTSL2SD instruction to the active function.
|
|
func (c *Context) CVTSL2SD(mr, x operand.Op) {
|
|
if inst, err := x86.CVTSL2SD(mr, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CVTSL2SD: Convert Dword Integer to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSL2SD r32 xmm
|
|
// CVTSL2SD m32 xmm
|
|
// Construct and append a CVTSL2SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTSL2SD(mr, x operand.Op) { ctx.CVTSL2SD(mr, x) }
|
|
|
|
// CVTSL2SS: Convert Dword Integer to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSL2SS r32 xmm
|
|
// CVTSL2SS m32 xmm
|
|
// Construct and append a CVTSL2SS instruction to the active function.
|
|
func (c *Context) CVTSL2SS(mr, x operand.Op) {
|
|
if inst, err := x86.CVTSL2SS(mr, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CVTSL2SS: Convert Dword Integer to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSL2SS r32 xmm
|
|
// CVTSL2SS m32 xmm
|
|
// Construct and append a CVTSL2SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTSL2SS(mr, x operand.Op) { ctx.CVTSL2SS(mr, x) }
|
|
|
|
// CVTSQ2SD: Convert Dword Integer to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSQ2SD r64 xmm
|
|
// CVTSQ2SD m64 xmm
|
|
// Construct and append a CVTSQ2SD instruction to the active function.
|
|
func (c *Context) CVTSQ2SD(mr, x operand.Op) {
|
|
if inst, err := x86.CVTSQ2SD(mr, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CVTSQ2SD: Convert Dword Integer to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSQ2SD r64 xmm
|
|
// CVTSQ2SD m64 xmm
|
|
// Construct and append a CVTSQ2SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTSQ2SD(mr, x operand.Op) { ctx.CVTSQ2SD(mr, x) }
|
|
|
|
// CVTSQ2SS: Convert Dword Integer to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSQ2SS r64 xmm
|
|
// CVTSQ2SS m64 xmm
|
|
// Construct and append a CVTSQ2SS instruction to the active function.
|
|
func (c *Context) CVTSQ2SS(mr, x operand.Op) {
|
|
if inst, err := x86.CVTSQ2SS(mr, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CVTSQ2SS: Convert Dword Integer to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSQ2SS r64 xmm
|
|
// CVTSQ2SS m64 xmm
|
|
// Construct and append a CVTSQ2SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTSQ2SS(mr, x operand.Op) { ctx.CVTSQ2SS(mr, x) }
|
|
|
|
// CVTSS2SD: Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSS2SD xmm xmm
|
|
// CVTSS2SD m32 xmm
|
|
// Construct and append a CVTSS2SD instruction to the active function.
|
|
func (c *Context) CVTSS2SD(mx, x operand.Op) {
|
|
if inst, err := x86.CVTSS2SD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CVTSS2SD: Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSS2SD xmm xmm
|
|
// CVTSS2SD m32 xmm
|
|
// Construct and append a CVTSS2SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTSS2SD(mx, x operand.Op) { ctx.CVTSS2SD(mx, x) }
|
|
|
|
// CVTSS2SL: Convert Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSS2SL xmm r32
|
|
// CVTSS2SL m32 r32
|
|
// CVTSS2SL xmm r64
|
|
// CVTSS2SL m32 r64
|
|
// Construct and append a CVTSS2SL instruction to the active function.
|
|
func (c *Context) CVTSS2SL(mx, r operand.Op) {
|
|
if inst, err := x86.CVTSS2SL(mx, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CVTSS2SL: Convert Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSS2SL xmm r32
|
|
// CVTSS2SL m32 r32
|
|
// CVTSS2SL xmm r64
|
|
// CVTSS2SL m32 r64
|
|
// Construct and append a CVTSS2SL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTSS2SL(mx, r operand.Op) { ctx.CVTSS2SL(mx, r) }
|
|
|
|
// CVTTPD2PL: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTTPD2PL xmm xmm
|
|
// CVTTPD2PL m128 xmm
|
|
// Construct and append a CVTTPD2PL instruction to the active function.
|
|
func (c *Context) CVTTPD2PL(mx, x operand.Op) {
|
|
if inst, err := x86.CVTTPD2PL(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CVTTPD2PL: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTTPD2PL xmm xmm
|
|
// CVTTPD2PL m128 xmm
|
|
// Construct and append a CVTTPD2PL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTTPD2PL(mx, x operand.Op) { ctx.CVTTPD2PL(mx, x) }
|
|
|
|
// CVTTPS2PL: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTTPS2PL xmm xmm
|
|
// CVTTPS2PL m128 xmm
|
|
// Construct and append a CVTTPS2PL instruction to the active function.
|
|
func (c *Context) CVTTPS2PL(mx, x operand.Op) {
|
|
if inst, err := x86.CVTTPS2PL(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CVTTPS2PL: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTTPS2PL xmm xmm
|
|
// CVTTPS2PL m128 xmm
|
|
// Construct and append a CVTTPS2PL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTTPS2PL(mx, x operand.Op) { ctx.CVTTPS2PL(mx, x) }
|
|
|
|
// CVTTSD2SL: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTTSD2SL xmm r32
|
|
// CVTTSD2SL m64 r32
|
|
// Construct and append a CVTTSD2SL instruction to the active function.
|
|
func (c *Context) CVTTSD2SL(mx, r operand.Op) {
|
|
if inst, err := x86.CVTTSD2SL(mx, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CVTTSD2SL: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTTSD2SL xmm r32
|
|
// CVTTSD2SL m64 r32
|
|
// Construct and append a CVTTSD2SL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTTSD2SL(mx, r operand.Op) { ctx.CVTTSD2SL(mx, r) }
|
|
|
|
// CVTTSD2SQ: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTTSD2SQ xmm r64
|
|
// CVTTSD2SQ m64 r64
|
|
// Construct and append a CVTTSD2SQ instruction to the active function.
|
|
func (c *Context) CVTTSD2SQ(mx, r operand.Op) {
|
|
if inst, err := x86.CVTTSD2SQ(mx, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CVTTSD2SQ: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTTSD2SQ xmm r64
|
|
// CVTTSD2SQ m64 r64
|
|
// Construct and append a CVTTSD2SQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTTSD2SQ(mx, r operand.Op) { ctx.CVTTSD2SQ(mx, r) }
|
|
|
|
// CVTTSS2SL: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTTSS2SL xmm r32
|
|
// CVTTSS2SL m32 r32
|
|
// CVTTSS2SL xmm r64
|
|
// CVTTSS2SL m32 r64
|
|
// Construct and append a CVTTSS2SL instruction to the active function.
|
|
func (c *Context) CVTTSS2SL(mx, r operand.Op) {
|
|
if inst, err := x86.CVTTSS2SL(mx, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CVTTSS2SL: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTTSS2SL xmm r32
|
|
// CVTTSS2SL m32 r32
|
|
// CVTTSS2SL xmm r64
|
|
// CVTTSS2SL m32 r64
|
|
// Construct and append a CVTTSS2SL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTTSS2SL(mx, r operand.Op) { ctx.CVTTSS2SL(mx, r) }
|
|
|
|
// CWD: Convert Word to Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CWD
|
|
// Construct and append a CWD instruction to the active function.
|
|
func (c *Context) CWD() {
|
|
if inst, err := x86.CWD(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CWD: Convert Word to Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CWD
|
|
// Construct and append a CWD instruction to the active function.
|
|
// Operates on the global context.
|
|
func CWD() { ctx.CWD() }
|
|
|
|
// CWDE: Convert Word to Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CWDE
|
|
// Construct and append a CWDE instruction to the active function.
|
|
func (c *Context) CWDE() {
|
|
if inst, err := x86.CWDE(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// CWDE: Convert Word to Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CWDE
|
|
// Construct and append a CWDE instruction to the active function.
|
|
// Operates on the global context.
|
|
func CWDE() { ctx.CWDE() }
|
|
|
|
// DECB: Decrement by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DECB r8
|
|
// DECB m8
|
|
// Construct and append a DECB instruction to the active function.
|
|
func (c *Context) DECB(mr operand.Op) {
|
|
if inst, err := x86.DECB(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// DECB: Decrement by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DECB r8
|
|
// DECB m8
|
|
// Construct and append a DECB instruction to the active function.
|
|
// Operates on the global context.
|
|
func DECB(mr operand.Op) { ctx.DECB(mr) }
|
|
|
|
// DECL: Decrement by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DECL r32
|
|
// DECL m32
|
|
// Construct and append a DECL instruction to the active function.
|
|
func (c *Context) DECL(mr operand.Op) {
|
|
if inst, err := x86.DECL(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// DECL: Decrement by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DECL r32
|
|
// DECL m32
|
|
// Construct and append a DECL instruction to the active function.
|
|
// Operates on the global context.
|
|
func DECL(mr operand.Op) { ctx.DECL(mr) }
|
|
|
|
// DECQ: Decrement by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DECQ r64
|
|
// DECQ m64
|
|
// Construct and append a DECQ instruction to the active function.
|
|
func (c *Context) DECQ(mr operand.Op) {
|
|
if inst, err := x86.DECQ(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// DECQ: Decrement by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DECQ r64
|
|
// DECQ m64
|
|
// Construct and append a DECQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func DECQ(mr operand.Op) { ctx.DECQ(mr) }
|
|
|
|
// DECW: Decrement by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DECW r16
|
|
// DECW m16
|
|
// Construct and append a DECW instruction to the active function.
|
|
func (c *Context) DECW(mr operand.Op) {
|
|
if inst, err := x86.DECW(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// DECW: Decrement by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DECW r16
|
|
// DECW m16
|
|
// Construct and append a DECW instruction to the active function.
|
|
// Operates on the global context.
|
|
func DECW(mr operand.Op) { ctx.DECW(mr) }
|
|
|
|
// DIVB: Unsigned Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVB r8
|
|
// DIVB m8
|
|
// Construct and append a DIVB instruction to the active function.
|
|
func (c *Context) DIVB(mr operand.Op) {
|
|
if inst, err := x86.DIVB(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// DIVB: Unsigned Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVB r8
|
|
// DIVB m8
|
|
// Construct and append a DIVB instruction to the active function.
|
|
// Operates on the global context.
|
|
func DIVB(mr operand.Op) { ctx.DIVB(mr) }
|
|
|
|
// DIVL: Unsigned Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVL r32
|
|
// DIVL m32
|
|
// Construct and append a DIVL instruction to the active function.
|
|
func (c *Context) DIVL(mr operand.Op) {
|
|
if inst, err := x86.DIVL(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// DIVL: Unsigned Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVL r32
|
|
// DIVL m32
|
|
// Construct and append a DIVL instruction to the active function.
|
|
// Operates on the global context.
|
|
func DIVL(mr operand.Op) { ctx.DIVL(mr) }
|
|
|
|
// DIVPD: Divide Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVPD xmm xmm
|
|
// DIVPD m128 xmm
|
|
// Construct and append a DIVPD instruction to the active function.
|
|
func (c *Context) DIVPD(mx, x operand.Op) {
|
|
if inst, err := x86.DIVPD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// DIVPD: Divide Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVPD xmm xmm
|
|
// DIVPD m128 xmm
|
|
// Construct and append a DIVPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func DIVPD(mx, x operand.Op) { ctx.DIVPD(mx, x) }
|
|
|
|
// DIVPS: Divide Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVPS xmm xmm
|
|
// DIVPS m128 xmm
|
|
// Construct and append a DIVPS instruction to the active function.
|
|
func (c *Context) DIVPS(mx, x operand.Op) {
|
|
if inst, err := x86.DIVPS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// DIVPS: Divide Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVPS xmm xmm
|
|
// DIVPS m128 xmm
|
|
// Construct and append a DIVPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func DIVPS(mx, x operand.Op) { ctx.DIVPS(mx, x) }
|
|
|
|
// DIVQ: Unsigned Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVQ r64
|
|
// DIVQ m64
|
|
// Construct and append a DIVQ instruction to the active function.
|
|
func (c *Context) DIVQ(mr operand.Op) {
|
|
if inst, err := x86.DIVQ(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// DIVQ: Unsigned Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVQ r64
|
|
// DIVQ m64
|
|
// Construct and append a DIVQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func DIVQ(mr operand.Op) { ctx.DIVQ(mr) }
|
|
|
|
// DIVSD: Divide Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVSD xmm xmm
|
|
// DIVSD m64 xmm
|
|
// Construct and append a DIVSD instruction to the active function.
|
|
func (c *Context) DIVSD(mx, x operand.Op) {
|
|
if inst, err := x86.DIVSD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// DIVSD: Divide Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVSD xmm xmm
|
|
// DIVSD m64 xmm
|
|
// Construct and append a DIVSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func DIVSD(mx, x operand.Op) { ctx.DIVSD(mx, x) }
|
|
|
|
// DIVSS: Divide Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVSS xmm xmm
|
|
// DIVSS m32 xmm
|
|
// Construct and append a DIVSS instruction to the active function.
|
|
func (c *Context) DIVSS(mx, x operand.Op) {
|
|
if inst, err := x86.DIVSS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// DIVSS: Divide Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVSS xmm xmm
|
|
// DIVSS m32 xmm
|
|
// Construct and append a DIVSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func DIVSS(mx, x operand.Op) { ctx.DIVSS(mx, x) }
|
|
|
|
// DIVW: Unsigned Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVW r16
|
|
// DIVW m16
|
|
// Construct and append a DIVW instruction to the active function.
|
|
func (c *Context) DIVW(mr operand.Op) {
|
|
if inst, err := x86.DIVW(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// DIVW: Unsigned Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVW r16
|
|
// DIVW m16
|
|
// Construct and append a DIVW instruction to the active function.
|
|
// Operates on the global context.
|
|
func DIVW(mr operand.Op) { ctx.DIVW(mr) }
|
|
|
|
// DPPD: Dot Product of Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DPPD imm8 xmm xmm
|
|
// DPPD imm8 m128 xmm
|
|
// Construct and append a DPPD instruction to the active function.
|
|
func (c *Context) DPPD(i, mx, x operand.Op) {
|
|
if inst, err := x86.DPPD(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// DPPD: Dot Product of Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DPPD imm8 xmm xmm
|
|
// DPPD imm8 m128 xmm
|
|
// Construct and append a DPPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func DPPD(i, mx, x operand.Op) { ctx.DPPD(i, mx, x) }
|
|
|
|
// DPPS: Dot Product of Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DPPS imm8 xmm xmm
|
|
// DPPS imm8 m128 xmm
|
|
// Construct and append a DPPS instruction to the active function.
|
|
func (c *Context) DPPS(i, mx, x operand.Op) {
|
|
if inst, err := x86.DPPS(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// DPPS: Dot Product of Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DPPS imm8 xmm xmm
|
|
// DPPS imm8 m128 xmm
|
|
// Construct and append a DPPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func DPPS(i, mx, x operand.Op) { ctx.DPPS(i, mx, x) }
|
|
|
|
// EXTRACTPS: Extract Packed Single Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// EXTRACTPS imm2u xmm r32
|
|
// EXTRACTPS imm2u xmm m32
|
|
// Construct and append a EXTRACTPS instruction to the active function.
|
|
func (c *Context) EXTRACTPS(i, x, mr operand.Op) {
|
|
if inst, err := x86.EXTRACTPS(i, x, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// EXTRACTPS: Extract Packed Single Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// EXTRACTPS imm2u xmm r32
|
|
// EXTRACTPS imm2u xmm m32
|
|
// Construct and append a EXTRACTPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func EXTRACTPS(i, x, mr operand.Op) { ctx.EXTRACTPS(i, x, mr) }
|
|
|
|
// HADDPD: Packed Double-FP Horizontal Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// HADDPD xmm xmm
|
|
// HADDPD m128 xmm
|
|
// Construct and append a HADDPD instruction to the active function.
|
|
func (c *Context) HADDPD(mx, x operand.Op) {
|
|
if inst, err := x86.HADDPD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// HADDPD: Packed Double-FP Horizontal Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// HADDPD xmm xmm
|
|
// HADDPD m128 xmm
|
|
// Construct and append a HADDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func HADDPD(mx, x operand.Op) { ctx.HADDPD(mx, x) }
|
|
|
|
// HADDPS: Packed Single-FP Horizontal Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// HADDPS xmm xmm
|
|
// HADDPS m128 xmm
|
|
// Construct and append a HADDPS instruction to the active function.
|
|
func (c *Context) HADDPS(mx, x operand.Op) {
|
|
if inst, err := x86.HADDPS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// HADDPS: Packed Single-FP Horizontal Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// HADDPS xmm xmm
|
|
// HADDPS m128 xmm
|
|
// Construct and append a HADDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func HADDPS(mx, x operand.Op) { ctx.HADDPS(mx, x) }
|
|
|
|
// HSUBPD: Packed Double-FP Horizontal Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// HSUBPD xmm xmm
|
|
// HSUBPD m128 xmm
|
|
// Construct and append a HSUBPD instruction to the active function.
|
|
func (c *Context) HSUBPD(mx, x operand.Op) {
|
|
if inst, err := x86.HSUBPD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// HSUBPD: Packed Double-FP Horizontal Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// HSUBPD xmm xmm
|
|
// HSUBPD m128 xmm
|
|
// Construct and append a HSUBPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func HSUBPD(mx, x operand.Op) { ctx.HSUBPD(mx, x) }
|
|
|
|
// HSUBPS: Packed Single-FP Horizontal Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// HSUBPS xmm xmm
|
|
// HSUBPS m128 xmm
|
|
// Construct and append a HSUBPS instruction to the active function.
|
|
func (c *Context) HSUBPS(mx, x operand.Op) {
|
|
if inst, err := x86.HSUBPS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// HSUBPS: Packed Single-FP Horizontal Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// HSUBPS xmm xmm
|
|
// HSUBPS m128 xmm
|
|
// Construct and append a HSUBPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func HSUBPS(mx, x operand.Op) { ctx.HSUBPS(mx, x) }
|
|
|
|
// IDIVB: Signed Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IDIVB r8
|
|
// IDIVB m8
|
|
// Construct and append a IDIVB instruction to the active function.
|
|
func (c *Context) IDIVB(mr operand.Op) {
|
|
if inst, err := x86.IDIVB(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// IDIVB: Signed Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IDIVB r8
|
|
// IDIVB m8
|
|
// Construct and append a IDIVB instruction to the active function.
|
|
// Operates on the global context.
|
|
func IDIVB(mr operand.Op) { ctx.IDIVB(mr) }
|
|
|
|
// IDIVL: Signed Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IDIVL r32
|
|
// IDIVL m32
|
|
// Construct and append a IDIVL instruction to the active function.
|
|
func (c *Context) IDIVL(mr operand.Op) {
|
|
if inst, err := x86.IDIVL(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// IDIVL: Signed Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IDIVL r32
|
|
// IDIVL m32
|
|
// Construct and append a IDIVL instruction to the active function.
|
|
// Operates on the global context.
|
|
func IDIVL(mr operand.Op) { ctx.IDIVL(mr) }
|
|
|
|
// IDIVQ: Signed Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IDIVQ r64
|
|
// IDIVQ m64
|
|
// Construct and append a IDIVQ instruction to the active function.
|
|
func (c *Context) IDIVQ(mr operand.Op) {
|
|
if inst, err := x86.IDIVQ(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// IDIVQ: Signed Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IDIVQ r64
|
|
// IDIVQ m64
|
|
// Construct and append a IDIVQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func IDIVQ(mr operand.Op) { ctx.IDIVQ(mr) }
|
|
|
|
// IDIVW: Signed Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IDIVW r16
|
|
// IDIVW m16
|
|
// Construct and append a IDIVW instruction to the active function.
|
|
func (c *Context) IDIVW(mr operand.Op) {
|
|
if inst, err := x86.IDIVW(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// IDIVW: Signed Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IDIVW r16
|
|
// IDIVW m16
|
|
// Construct and append a IDIVW instruction to the active function.
|
|
// Operates on the global context.
|
|
func IDIVW(mr operand.Op) { ctx.IDIVW(mr) }
|
|
|
|
// IMUL3L: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMUL3L imm8 r32 r32
|
|
// IMUL3L imm32 r32 r32
|
|
// IMUL3L imm8 m32 r32
|
|
// IMUL3L imm32 m32 r32
|
|
// Construct and append a IMUL3L instruction to the active function.
|
|
func (c *Context) IMUL3L(i, mr, r operand.Op) {
|
|
if inst, err := x86.IMUL3L(i, mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// IMUL3L: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMUL3L imm8 r32 r32
|
|
// IMUL3L imm32 r32 r32
|
|
// IMUL3L imm8 m32 r32
|
|
// IMUL3L imm32 m32 r32
|
|
// Construct and append a IMUL3L instruction to the active function.
|
|
// Operates on the global context.
|
|
func IMUL3L(i, mr, r operand.Op) { ctx.IMUL3L(i, mr, r) }
|
|
|
|
// IMUL3Q: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMUL3Q imm8 r64 r64
|
|
// IMUL3Q imm32 r64 r64
|
|
// IMUL3Q imm8 m64 r64
|
|
// IMUL3Q imm32 m64 r64
|
|
// Construct and append a IMUL3Q instruction to the active function.
|
|
func (c *Context) IMUL3Q(i, mr, r operand.Op) {
|
|
if inst, err := x86.IMUL3Q(i, mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// IMUL3Q: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMUL3Q imm8 r64 r64
|
|
// IMUL3Q imm32 r64 r64
|
|
// IMUL3Q imm8 m64 r64
|
|
// IMUL3Q imm32 m64 r64
|
|
// Construct and append a IMUL3Q instruction to the active function.
|
|
// Operates on the global context.
|
|
func IMUL3Q(i, mr, r operand.Op) { ctx.IMUL3Q(i, mr, r) }
|
|
|
|
// IMUL3W: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMUL3W imm8 r16 r16
|
|
// IMUL3W imm16 r16 r16
|
|
// IMUL3W imm8 m16 r16
|
|
// IMUL3W imm16 m16 r16
|
|
// Construct and append a IMUL3W instruction to the active function.
|
|
func (c *Context) IMUL3W(i, mr, r operand.Op) {
|
|
if inst, err := x86.IMUL3W(i, mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// IMUL3W: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMUL3W imm8 r16 r16
|
|
// IMUL3W imm16 r16 r16
|
|
// IMUL3W imm8 m16 r16
|
|
// IMUL3W imm16 m16 r16
|
|
// Construct and append a IMUL3W instruction to the active function.
|
|
// Operates on the global context.
|
|
func IMUL3W(i, mr, r operand.Op) { ctx.IMUL3W(i, mr, r) }
|
|
|
|
// IMULB: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMULB r8
|
|
// IMULB m8
|
|
// Construct and append a IMULB instruction to the active function.
|
|
func (c *Context) IMULB(mr operand.Op) {
|
|
if inst, err := x86.IMULB(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// IMULB: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMULB r8
|
|
// IMULB m8
|
|
// Construct and append a IMULB instruction to the active function.
|
|
// Operates on the global context.
|
|
func IMULB(mr operand.Op) { ctx.IMULB(mr) }
|
|
|
|
// IMULL: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMULL r32
|
|
// IMULL m32
|
|
// IMULL r32 r32
|
|
// IMULL m32 r32
|
|
// Construct and append a IMULL instruction to the active function.
|
|
func (c *Context) IMULL(ops ...operand.Op) {
|
|
if inst, err := x86.IMULL(ops...); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// IMULL: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMULL r32
|
|
// IMULL m32
|
|
// IMULL r32 r32
|
|
// IMULL m32 r32
|
|
// Construct and append a IMULL instruction to the active function.
|
|
// Operates on the global context.
|
|
func IMULL(ops ...operand.Op) { ctx.IMULL(ops...) }
|
|
|
|
// IMULQ: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMULQ r64
|
|
// IMULQ m64
|
|
// IMULQ r64 r64
|
|
// IMULQ m64 r64
|
|
// Construct and append a IMULQ instruction to the active function.
|
|
func (c *Context) IMULQ(ops ...operand.Op) {
|
|
if inst, err := x86.IMULQ(ops...); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// IMULQ: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMULQ r64
|
|
// IMULQ m64
|
|
// IMULQ r64 r64
|
|
// IMULQ m64 r64
|
|
// Construct and append a IMULQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func IMULQ(ops ...operand.Op) { ctx.IMULQ(ops...) }
|
|
|
|
// IMULW: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMULW r16
|
|
// IMULW m16
|
|
// IMULW r16 r16
|
|
// IMULW m16 r16
|
|
// Construct and append a IMULW instruction to the active function.
|
|
func (c *Context) IMULW(ops ...operand.Op) {
|
|
if inst, err := x86.IMULW(ops...); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// IMULW: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMULW r16
|
|
// IMULW m16
|
|
// IMULW r16 r16
|
|
// IMULW m16 r16
|
|
// Construct and append a IMULW instruction to the active function.
|
|
// Operates on the global context.
|
|
func IMULW(ops ...operand.Op) { ctx.IMULW(ops...) }
|
|
|
|
// INCB: Increment by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INCB r8
|
|
// INCB m8
|
|
// Construct and append a INCB instruction to the active function.
|
|
func (c *Context) INCB(mr operand.Op) {
|
|
if inst, err := x86.INCB(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// INCB: Increment by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INCB r8
|
|
// INCB m8
|
|
// Construct and append a INCB instruction to the active function.
|
|
// Operates on the global context.
|
|
func INCB(mr operand.Op) { ctx.INCB(mr) }
|
|
|
|
// INCL: Increment by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INCL r32
|
|
// INCL m32
|
|
// Construct and append a INCL instruction to the active function.
|
|
func (c *Context) INCL(mr operand.Op) {
|
|
if inst, err := x86.INCL(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// INCL: Increment by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INCL r32
|
|
// INCL m32
|
|
// Construct and append a INCL instruction to the active function.
|
|
// Operates on the global context.
|
|
func INCL(mr operand.Op) { ctx.INCL(mr) }
|
|
|
|
// INCQ: Increment by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INCQ r64
|
|
// INCQ m64
|
|
// Construct and append a INCQ instruction to the active function.
|
|
func (c *Context) INCQ(mr operand.Op) {
|
|
if inst, err := x86.INCQ(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// INCQ: Increment by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INCQ r64
|
|
// INCQ m64
|
|
// Construct and append a INCQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func INCQ(mr operand.Op) { ctx.INCQ(mr) }
|
|
|
|
// INCW: Increment by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INCW r16
|
|
// INCW m16
|
|
// Construct and append a INCW instruction to the active function.
|
|
func (c *Context) INCW(mr operand.Op) {
|
|
if inst, err := x86.INCW(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// INCW: Increment by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INCW r16
|
|
// INCW m16
|
|
// Construct and append a INCW instruction to the active function.
|
|
// Operates on the global context.
|
|
func INCW(mr operand.Op) { ctx.INCW(mr) }
|
|
|
|
// INSERTPS: Insert Packed Single Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INSERTPS imm8 xmm xmm
|
|
// INSERTPS imm8 m32 xmm
|
|
// Construct and append a INSERTPS instruction to the active function.
|
|
func (c *Context) INSERTPS(i, mx, x operand.Op) {
|
|
if inst, err := x86.INSERTPS(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// INSERTPS: Insert Packed Single Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INSERTPS imm8 xmm xmm
|
|
// INSERTPS imm8 m32 xmm
|
|
// Construct and append a INSERTPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func INSERTPS(i, mx, x operand.Op) { ctx.INSERTPS(i, mx, x) }
|
|
|
|
// INT: Call to Interrupt Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INT 3
|
|
// INT imm8
|
|
// Construct and append a INT instruction to the active function.
|
|
func (c *Context) INT(i operand.Op) {
|
|
if inst, err := x86.INT(i); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// INT: Call to Interrupt Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INT 3
|
|
// INT imm8
|
|
// Construct and append a INT instruction to the active function.
|
|
// Operates on the global context.
|
|
func INT(i operand.Op) { ctx.INT(i) }
|
|
|
|
// JA: Jump if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JA rel8
|
|
// JA rel32
|
|
// Construct and append a JA instruction to the active function.
|
|
func (c *Context) JA(r operand.Op) {
|
|
if inst, err := x86.JA(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JA: Jump if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JA rel8
|
|
// JA rel32
|
|
// Construct and append a JA instruction to the active function.
|
|
// Operates on the global context.
|
|
func JA(r operand.Op) { ctx.JA(r) }
|
|
|
|
// JAE: Jump if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JAE rel8
|
|
// JAE rel32
|
|
// Construct and append a JAE instruction to the active function.
|
|
func (c *Context) JAE(r operand.Op) {
|
|
if inst, err := x86.JAE(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JAE: Jump if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JAE rel8
|
|
// JAE rel32
|
|
// Construct and append a JAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func JAE(r operand.Op) { ctx.JAE(r) }
|
|
|
|
// JB: Jump if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JB rel8
|
|
// JB rel32
|
|
// Construct and append a JB instruction to the active function.
|
|
func (c *Context) JB(r operand.Op) {
|
|
if inst, err := x86.JB(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JB: Jump if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JB rel8
|
|
// JB rel32
|
|
// Construct and append a JB instruction to the active function.
|
|
// Operates on the global context.
|
|
func JB(r operand.Op) { ctx.JB(r) }
|
|
|
|
// JBE: Jump if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JBE rel8
|
|
// JBE rel32
|
|
// Construct and append a JBE instruction to the active function.
|
|
func (c *Context) JBE(r operand.Op) {
|
|
if inst, err := x86.JBE(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JBE: Jump if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JBE rel8
|
|
// JBE rel32
|
|
// Construct and append a JBE instruction to the active function.
|
|
// Operates on the global context.
|
|
func JBE(r operand.Op) { ctx.JBE(r) }
|
|
|
|
// JC: Jump if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JC rel8
|
|
// JC rel32
|
|
// Construct and append a JC instruction to the active function.
|
|
func (c *Context) JC(r operand.Op) {
|
|
if inst, err := x86.JC(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JC: Jump if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JC rel8
|
|
// JC rel32
|
|
// Construct and append a JC instruction to the active function.
|
|
// Operates on the global context.
|
|
func JC(r operand.Op) { ctx.JC(r) }
|
|
|
|
// JCC: Jump if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JCC rel8
|
|
// JCC rel32
|
|
// Construct and append a JCC instruction to the active function.
|
|
func (c *Context) JCC(r operand.Op) {
|
|
if inst, err := x86.JCC(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JCC: Jump if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JCC rel8
|
|
// JCC rel32
|
|
// Construct and append a JCC instruction to the active function.
|
|
// Operates on the global context.
|
|
func JCC(r operand.Op) { ctx.JCC(r) }
|
|
|
|
// JCS: Jump if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JCS rel8
|
|
// JCS rel32
|
|
// Construct and append a JCS instruction to the active function.
|
|
func (c *Context) JCS(r operand.Op) {
|
|
if inst, err := x86.JCS(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JCS: Jump if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JCS rel8
|
|
// JCS rel32
|
|
// Construct and append a JCS instruction to the active function.
|
|
// Operates on the global context.
|
|
func JCS(r operand.Op) { ctx.JCS(r) }
|
|
|
|
// JCXZL: Jump if ECX register is 0.
|
|
//
|
|
// Forms:
|
|
//
|
|
// JCXZL rel8
|
|
// Construct and append a JCXZL instruction to the active function.
|
|
func (c *Context) JCXZL(r operand.Op) {
|
|
if inst, err := x86.JCXZL(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JCXZL: Jump if ECX register is 0.
|
|
//
|
|
// Forms:
|
|
//
|
|
// JCXZL rel8
|
|
// Construct and append a JCXZL instruction to the active function.
|
|
// Operates on the global context.
|
|
func JCXZL(r operand.Op) { ctx.JCXZL(r) }
|
|
|
|
// JCXZQ: Jump if RCX register is 0.
|
|
//
|
|
// Forms:
|
|
//
|
|
// JCXZQ rel8
|
|
// Construct and append a JCXZQ instruction to the active function.
|
|
func (c *Context) JCXZQ(r operand.Op) {
|
|
if inst, err := x86.JCXZQ(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JCXZQ: Jump if RCX register is 0.
|
|
//
|
|
// Forms:
|
|
//
|
|
// JCXZQ rel8
|
|
// Construct and append a JCXZQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func JCXZQ(r operand.Op) { ctx.JCXZQ(r) }
|
|
|
|
// JE: Jump if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JE rel8
|
|
// JE rel32
|
|
// Construct and append a JE instruction to the active function.
|
|
func (c *Context) JE(r operand.Op) {
|
|
if inst, err := x86.JE(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JE: Jump if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JE rel8
|
|
// JE rel32
|
|
// Construct and append a JE instruction to the active function.
|
|
// Operates on the global context.
|
|
func JE(r operand.Op) { ctx.JE(r) }
|
|
|
|
// JEQ: Jump if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JEQ rel8
|
|
// JEQ rel32
|
|
// Construct and append a JEQ instruction to the active function.
|
|
func (c *Context) JEQ(r operand.Op) {
|
|
if inst, err := x86.JEQ(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JEQ: Jump if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JEQ rel8
|
|
// JEQ rel32
|
|
// Construct and append a JEQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func JEQ(r operand.Op) { ctx.JEQ(r) }
|
|
|
|
// JG: Jump if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JG rel8
|
|
// JG rel32
|
|
// Construct and append a JG instruction to the active function.
|
|
func (c *Context) JG(r operand.Op) {
|
|
if inst, err := x86.JG(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JG: Jump if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JG rel8
|
|
// JG rel32
|
|
// Construct and append a JG instruction to the active function.
|
|
// Operates on the global context.
|
|
func JG(r operand.Op) { ctx.JG(r) }
|
|
|
|
// JGE: Jump if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JGE rel8
|
|
// JGE rel32
|
|
// Construct and append a JGE instruction to the active function.
|
|
func (c *Context) JGE(r operand.Op) {
|
|
if inst, err := x86.JGE(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JGE: Jump if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JGE rel8
|
|
// JGE rel32
|
|
// Construct and append a JGE instruction to the active function.
|
|
// Operates on the global context.
|
|
func JGE(r operand.Op) { ctx.JGE(r) }
|
|
|
|
// JGT: Jump if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JGT rel8
|
|
// JGT rel32
|
|
// Construct and append a JGT instruction to the active function.
|
|
func (c *Context) JGT(r operand.Op) {
|
|
if inst, err := x86.JGT(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JGT: Jump if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JGT rel8
|
|
// JGT rel32
|
|
// Construct and append a JGT instruction to the active function.
|
|
// Operates on the global context.
|
|
func JGT(r operand.Op) { ctx.JGT(r) }
|
|
|
|
// JHI: Jump if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JHI rel8
|
|
// JHI rel32
|
|
// Construct and append a JHI instruction to the active function.
|
|
func (c *Context) JHI(r operand.Op) {
|
|
if inst, err := x86.JHI(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JHI: Jump if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JHI rel8
|
|
// JHI rel32
|
|
// Construct and append a JHI instruction to the active function.
|
|
// Operates on the global context.
|
|
func JHI(r operand.Op) { ctx.JHI(r) }
|
|
|
|
// JHS: Jump if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JHS rel8
|
|
// JHS rel32
|
|
// Construct and append a JHS instruction to the active function.
|
|
func (c *Context) JHS(r operand.Op) {
|
|
if inst, err := x86.JHS(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JHS: Jump if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JHS rel8
|
|
// JHS rel32
|
|
// Construct and append a JHS instruction to the active function.
|
|
// Operates on the global context.
|
|
func JHS(r operand.Op) { ctx.JHS(r) }
|
|
|
|
// JL: Jump if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JL rel8
|
|
// JL rel32
|
|
// Construct and append a JL instruction to the active function.
|
|
func (c *Context) JL(r operand.Op) {
|
|
if inst, err := x86.JL(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JL: Jump if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JL rel8
|
|
// JL rel32
|
|
// Construct and append a JL instruction to the active function.
|
|
// Operates on the global context.
|
|
func JL(r operand.Op) { ctx.JL(r) }
|
|
|
|
// JLE: Jump if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JLE rel8
|
|
// JLE rel32
|
|
// Construct and append a JLE instruction to the active function.
|
|
func (c *Context) JLE(r operand.Op) {
|
|
if inst, err := x86.JLE(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JLE: Jump if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JLE rel8
|
|
// JLE rel32
|
|
// Construct and append a JLE instruction to the active function.
|
|
// Operates on the global context.
|
|
func JLE(r operand.Op) { ctx.JLE(r) }
|
|
|
|
// JLO: Jump if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JLO rel8
|
|
// JLO rel32
|
|
// Construct and append a JLO instruction to the active function.
|
|
func (c *Context) JLO(r operand.Op) {
|
|
if inst, err := x86.JLO(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JLO: Jump if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JLO rel8
|
|
// JLO rel32
|
|
// Construct and append a JLO instruction to the active function.
|
|
// Operates on the global context.
|
|
func JLO(r operand.Op) { ctx.JLO(r) }
|
|
|
|
// JLS: Jump if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JLS rel8
|
|
// JLS rel32
|
|
// Construct and append a JLS instruction to the active function.
|
|
func (c *Context) JLS(r operand.Op) {
|
|
if inst, err := x86.JLS(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JLS: Jump if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JLS rel8
|
|
// JLS rel32
|
|
// Construct and append a JLS instruction to the active function.
|
|
// Operates on the global context.
|
|
func JLS(r operand.Op) { ctx.JLS(r) }
|
|
|
|
// JLT: Jump if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JLT rel8
|
|
// JLT rel32
|
|
// Construct and append a JLT instruction to the active function.
|
|
func (c *Context) JLT(r operand.Op) {
|
|
if inst, err := x86.JLT(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JLT: Jump if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JLT rel8
|
|
// JLT rel32
|
|
// Construct and append a JLT instruction to the active function.
|
|
// Operates on the global context.
|
|
func JLT(r operand.Op) { ctx.JLT(r) }
|
|
|
|
// JMI: Jump if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JMI rel8
|
|
// JMI rel32
|
|
// Construct and append a JMI instruction to the active function.
|
|
func (c *Context) JMI(r operand.Op) {
|
|
if inst, err := x86.JMI(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JMI: Jump if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JMI rel8
|
|
// JMI rel32
|
|
// Construct and append a JMI instruction to the active function.
|
|
// Operates on the global context.
|
|
func JMI(r operand.Op) { ctx.JMI(r) }
|
|
|
|
// JMP: Jump Unconditionally.
|
|
//
|
|
// Forms:
|
|
//
|
|
// JMP rel8
|
|
// JMP rel32
|
|
// JMP r64
|
|
// JMP m64
|
|
// Construct and append a JMP instruction to the active function.
|
|
func (c *Context) JMP(mr operand.Op) {
|
|
if inst, err := x86.JMP(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JMP: Jump Unconditionally.
|
|
//
|
|
// Forms:
|
|
//
|
|
// JMP rel8
|
|
// JMP rel32
|
|
// JMP r64
|
|
// JMP m64
|
|
// Construct and append a JMP instruction to the active function.
|
|
// Operates on the global context.
|
|
func JMP(mr operand.Op) { ctx.JMP(mr) }
|
|
|
|
// JNA: Jump if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNA rel8
|
|
// JNA rel32
|
|
// Construct and append a JNA instruction to the active function.
|
|
func (c *Context) JNA(r operand.Op) {
|
|
if inst, err := x86.JNA(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JNA: Jump if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNA rel8
|
|
// JNA rel32
|
|
// Construct and append a JNA instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNA(r operand.Op) { ctx.JNA(r) }
|
|
|
|
// JNAE: Jump if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNAE rel8
|
|
// JNAE rel32
|
|
// Construct and append a JNAE instruction to the active function.
|
|
func (c *Context) JNAE(r operand.Op) {
|
|
if inst, err := x86.JNAE(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JNAE: Jump if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNAE rel8
|
|
// JNAE rel32
|
|
// Construct and append a JNAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNAE(r operand.Op) { ctx.JNAE(r) }
|
|
|
|
// JNB: Jump if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNB rel8
|
|
// JNB rel32
|
|
// Construct and append a JNB instruction to the active function.
|
|
func (c *Context) JNB(r operand.Op) {
|
|
if inst, err := x86.JNB(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JNB: Jump if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNB rel8
|
|
// JNB rel32
|
|
// Construct and append a JNB instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNB(r operand.Op) { ctx.JNB(r) }
|
|
|
|
// JNBE: Jump if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNBE rel8
|
|
// JNBE rel32
|
|
// Construct and append a JNBE instruction to the active function.
|
|
func (c *Context) JNBE(r operand.Op) {
|
|
if inst, err := x86.JNBE(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JNBE: Jump if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNBE rel8
|
|
// JNBE rel32
|
|
// Construct and append a JNBE instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNBE(r operand.Op) { ctx.JNBE(r) }
|
|
|
|
// JNC: Jump if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNC rel8
|
|
// JNC rel32
|
|
// Construct and append a JNC instruction to the active function.
|
|
func (c *Context) JNC(r operand.Op) {
|
|
if inst, err := x86.JNC(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JNC: Jump if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNC rel8
|
|
// JNC rel32
|
|
// Construct and append a JNC instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNC(r operand.Op) { ctx.JNC(r) }
|
|
|
|
// JNE: Jump if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNE rel8
|
|
// JNE rel32
|
|
// Construct and append a JNE instruction to the active function.
|
|
func (c *Context) JNE(r operand.Op) {
|
|
if inst, err := x86.JNE(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JNE: Jump if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNE rel8
|
|
// JNE rel32
|
|
// Construct and append a JNE instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNE(r operand.Op) { ctx.JNE(r) }
|
|
|
|
// JNG: Jump if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNG rel8
|
|
// JNG rel32
|
|
// Construct and append a JNG instruction to the active function.
|
|
func (c *Context) JNG(r operand.Op) {
|
|
if inst, err := x86.JNG(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JNG: Jump if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNG rel8
|
|
// JNG rel32
|
|
// Construct and append a JNG instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNG(r operand.Op) { ctx.JNG(r) }
|
|
|
|
// JNGE: Jump if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNGE rel8
|
|
// JNGE rel32
|
|
// Construct and append a JNGE instruction to the active function.
|
|
func (c *Context) JNGE(r operand.Op) {
|
|
if inst, err := x86.JNGE(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JNGE: Jump if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNGE rel8
|
|
// JNGE rel32
|
|
// Construct and append a JNGE instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNGE(r operand.Op) { ctx.JNGE(r) }
|
|
|
|
// JNL: Jump if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNL rel8
|
|
// JNL rel32
|
|
// Construct and append a JNL instruction to the active function.
|
|
func (c *Context) JNL(r operand.Op) {
|
|
if inst, err := x86.JNL(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JNL: Jump if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNL rel8
|
|
// JNL rel32
|
|
// Construct and append a JNL instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNL(r operand.Op) { ctx.JNL(r) }
|
|
|
|
// JNLE: Jump if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNLE rel8
|
|
// JNLE rel32
|
|
// Construct and append a JNLE instruction to the active function.
|
|
func (c *Context) JNLE(r operand.Op) {
|
|
if inst, err := x86.JNLE(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JNLE: Jump if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNLE rel8
|
|
// JNLE rel32
|
|
// Construct and append a JNLE instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNLE(r operand.Op) { ctx.JNLE(r) }
|
|
|
|
// JNO: Jump if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNO rel8
|
|
// JNO rel32
|
|
// Construct and append a JNO instruction to the active function.
|
|
func (c *Context) JNO(r operand.Op) {
|
|
if inst, err := x86.JNO(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JNO: Jump if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNO rel8
|
|
// JNO rel32
|
|
// Construct and append a JNO instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNO(r operand.Op) { ctx.JNO(r) }
|
|
|
|
// JNP: Jump if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNP rel8
|
|
// JNP rel32
|
|
// Construct and append a JNP instruction to the active function.
|
|
func (c *Context) JNP(r operand.Op) {
|
|
if inst, err := x86.JNP(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JNP: Jump if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNP rel8
|
|
// JNP rel32
|
|
// Construct and append a JNP instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNP(r operand.Op) { ctx.JNP(r) }
|
|
|
|
// JNS: Jump if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNS rel8
|
|
// JNS rel32
|
|
// Construct and append a JNS instruction to the active function.
|
|
func (c *Context) JNS(r operand.Op) {
|
|
if inst, err := x86.JNS(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JNS: Jump if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNS rel8
|
|
// JNS rel32
|
|
// Construct and append a JNS instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNS(r operand.Op) { ctx.JNS(r) }
|
|
|
|
// JNZ: Jump if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNZ rel8
|
|
// JNZ rel32
|
|
// Construct and append a JNZ instruction to the active function.
|
|
func (c *Context) JNZ(r operand.Op) {
|
|
if inst, err := x86.JNZ(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JNZ: Jump if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNZ rel8
|
|
// JNZ rel32
|
|
// Construct and append a JNZ instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNZ(r operand.Op) { ctx.JNZ(r) }
|
|
|
|
// JO: Jump if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JO rel8
|
|
// JO rel32
|
|
// Construct and append a JO instruction to the active function.
|
|
func (c *Context) JO(r operand.Op) {
|
|
if inst, err := x86.JO(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JO: Jump if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JO rel8
|
|
// JO rel32
|
|
// Construct and append a JO instruction to the active function.
|
|
// Operates on the global context.
|
|
func JO(r operand.Op) { ctx.JO(r) }
|
|
|
|
// JOC: Jump if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JOC rel8
|
|
// JOC rel32
|
|
// Construct and append a JOC instruction to the active function.
|
|
func (c *Context) JOC(r operand.Op) {
|
|
if inst, err := x86.JOC(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JOC: Jump if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JOC rel8
|
|
// JOC rel32
|
|
// Construct and append a JOC instruction to the active function.
|
|
// Operates on the global context.
|
|
func JOC(r operand.Op) { ctx.JOC(r) }
|
|
|
|
// JOS: Jump if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JOS rel8
|
|
// JOS rel32
|
|
// Construct and append a JOS instruction to the active function.
|
|
func (c *Context) JOS(r operand.Op) {
|
|
if inst, err := x86.JOS(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JOS: Jump if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JOS rel8
|
|
// JOS rel32
|
|
// Construct and append a JOS instruction to the active function.
|
|
// Operates on the global context.
|
|
func JOS(r operand.Op) { ctx.JOS(r) }
|
|
|
|
// JP: Jump if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JP rel8
|
|
// JP rel32
|
|
// Construct and append a JP instruction to the active function.
|
|
func (c *Context) JP(r operand.Op) {
|
|
if inst, err := x86.JP(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JP: Jump if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JP rel8
|
|
// JP rel32
|
|
// Construct and append a JP instruction to the active function.
|
|
// Operates on the global context.
|
|
func JP(r operand.Op) { ctx.JP(r) }
|
|
|
|
// JPC: Jump if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JPC rel8
|
|
// JPC rel32
|
|
// Construct and append a JPC instruction to the active function.
|
|
func (c *Context) JPC(r operand.Op) {
|
|
if inst, err := x86.JPC(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JPC: Jump if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JPC rel8
|
|
// JPC rel32
|
|
// Construct and append a JPC instruction to the active function.
|
|
// Operates on the global context.
|
|
func JPC(r operand.Op) { ctx.JPC(r) }
|
|
|
|
// JPE: Jump if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JPE rel8
|
|
// JPE rel32
|
|
// Construct and append a JPE instruction to the active function.
|
|
func (c *Context) JPE(r operand.Op) {
|
|
if inst, err := x86.JPE(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JPE: Jump if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JPE rel8
|
|
// JPE rel32
|
|
// Construct and append a JPE instruction to the active function.
|
|
// Operates on the global context.
|
|
func JPE(r operand.Op) { ctx.JPE(r) }
|
|
|
|
// JPL: Jump if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JPL rel8
|
|
// JPL rel32
|
|
// Construct and append a JPL instruction to the active function.
|
|
func (c *Context) JPL(r operand.Op) {
|
|
if inst, err := x86.JPL(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JPL: Jump if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JPL rel8
|
|
// JPL rel32
|
|
// Construct and append a JPL instruction to the active function.
|
|
// Operates on the global context.
|
|
func JPL(r operand.Op) { ctx.JPL(r) }
|
|
|
|
// JPO: Jump if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JPO rel8
|
|
// JPO rel32
|
|
// Construct and append a JPO instruction to the active function.
|
|
func (c *Context) JPO(r operand.Op) {
|
|
if inst, err := x86.JPO(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JPO: Jump if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JPO rel8
|
|
// JPO rel32
|
|
// Construct and append a JPO instruction to the active function.
|
|
// Operates on the global context.
|
|
func JPO(r operand.Op) { ctx.JPO(r) }
|
|
|
|
// JPS: Jump if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JPS rel8
|
|
// JPS rel32
|
|
// Construct and append a JPS instruction to the active function.
|
|
func (c *Context) JPS(r operand.Op) {
|
|
if inst, err := x86.JPS(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JPS: Jump if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JPS rel8
|
|
// JPS rel32
|
|
// Construct and append a JPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func JPS(r operand.Op) { ctx.JPS(r) }
|
|
|
|
// JS: Jump if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JS rel8
|
|
// JS rel32
|
|
// Construct and append a JS instruction to the active function.
|
|
func (c *Context) JS(r operand.Op) {
|
|
if inst, err := x86.JS(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JS: Jump if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JS rel8
|
|
// JS rel32
|
|
// Construct and append a JS instruction to the active function.
|
|
// Operates on the global context.
|
|
func JS(r operand.Op) { ctx.JS(r) }
|
|
|
|
// JZ: Jump if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JZ rel8
|
|
// JZ rel32
|
|
// Construct and append a JZ instruction to the active function.
|
|
func (c *Context) JZ(r operand.Op) {
|
|
if inst, err := x86.JZ(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// JZ: Jump if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JZ rel8
|
|
// JZ rel32
|
|
// Construct and append a JZ instruction to the active function.
|
|
// Operates on the global context.
|
|
func JZ(r operand.Op) { ctx.JZ(r) }
|
|
|
|
// LDDQU: Load Unaligned Integer 128 Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LDDQU m128 xmm
|
|
// Construct and append a LDDQU instruction to the active function.
|
|
func (c *Context) LDDQU(m, x operand.Op) {
|
|
if inst, err := x86.LDDQU(m, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// LDDQU: Load Unaligned Integer 128 Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LDDQU m128 xmm
|
|
// Construct and append a LDDQU instruction to the active function.
|
|
// Operates on the global context.
|
|
func LDDQU(m, x operand.Op) { ctx.LDDQU(m, x) }
|
|
|
|
// LDMXCSR: Load MXCSR Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LDMXCSR m32
|
|
// Construct and append a LDMXCSR instruction to the active function.
|
|
func (c *Context) LDMXCSR(m operand.Op) {
|
|
if inst, err := x86.LDMXCSR(m); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// LDMXCSR: Load MXCSR Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LDMXCSR m32
|
|
// Construct and append a LDMXCSR instruction to the active function.
|
|
// Operates on the global context.
|
|
func LDMXCSR(m operand.Op) { ctx.LDMXCSR(m) }
|
|
|
|
// LEAL: Load Effective Address.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LEAL m r32
|
|
// Construct and append a LEAL instruction to the active function.
|
|
func (c *Context) LEAL(m, r operand.Op) {
|
|
if inst, err := x86.LEAL(m, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// LEAL: Load Effective Address.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LEAL m r32
|
|
// Construct and append a LEAL instruction to the active function.
|
|
// Operates on the global context.
|
|
func LEAL(m, r operand.Op) { ctx.LEAL(m, r) }
|
|
|
|
// LEAQ: Load Effective Address.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LEAQ m r64
|
|
// Construct and append a LEAQ instruction to the active function.
|
|
func (c *Context) LEAQ(m, r operand.Op) {
|
|
if inst, err := x86.LEAQ(m, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// LEAQ: Load Effective Address.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LEAQ m r64
|
|
// Construct and append a LEAQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func LEAQ(m, r operand.Op) { ctx.LEAQ(m, r) }
|
|
|
|
// LEAW: Load Effective Address.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LEAW m r16
|
|
// Construct and append a LEAW instruction to the active function.
|
|
func (c *Context) LEAW(m, r operand.Op) {
|
|
if inst, err := x86.LEAW(m, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// LEAW: Load Effective Address.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LEAW m r16
|
|
// Construct and append a LEAW instruction to the active function.
|
|
// Operates on the global context.
|
|
func LEAW(m, r operand.Op) { ctx.LEAW(m, r) }
|
|
|
|
// LFENCE: Load Fence.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LFENCE
|
|
// Construct and append a LFENCE instruction to the active function.
|
|
func (c *Context) LFENCE() {
|
|
if inst, err := x86.LFENCE(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// LFENCE: Load Fence.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LFENCE
|
|
// Construct and append a LFENCE instruction to the active function.
|
|
// Operates on the global context.
|
|
func LFENCE() { ctx.LFENCE() }
|
|
|
|
// LZCNTL: Count the Number of Leading Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LZCNTL r32 r32
|
|
// LZCNTL m32 r32
|
|
// Construct and append a LZCNTL instruction to the active function.
|
|
func (c *Context) LZCNTL(mr, r operand.Op) {
|
|
if inst, err := x86.LZCNTL(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// LZCNTL: Count the Number of Leading Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LZCNTL r32 r32
|
|
// LZCNTL m32 r32
|
|
// Construct and append a LZCNTL instruction to the active function.
|
|
// Operates on the global context.
|
|
func LZCNTL(mr, r operand.Op) { ctx.LZCNTL(mr, r) }
|
|
|
|
// LZCNTQ: Count the Number of Leading Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LZCNTQ r64 r64
|
|
// LZCNTQ m64 r64
|
|
// Construct and append a LZCNTQ instruction to the active function.
|
|
func (c *Context) LZCNTQ(mr, r operand.Op) {
|
|
if inst, err := x86.LZCNTQ(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// LZCNTQ: Count the Number of Leading Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LZCNTQ r64 r64
|
|
// LZCNTQ m64 r64
|
|
// Construct and append a LZCNTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func LZCNTQ(mr, r operand.Op) { ctx.LZCNTQ(mr, r) }
|
|
|
|
// LZCNTW: Count the Number of Leading Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LZCNTW r16 r16
|
|
// LZCNTW m16 r16
|
|
// Construct and append a LZCNTW instruction to the active function.
|
|
func (c *Context) LZCNTW(mr, r operand.Op) {
|
|
if inst, err := x86.LZCNTW(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// LZCNTW: Count the Number of Leading Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LZCNTW r16 r16
|
|
// LZCNTW m16 r16
|
|
// Construct and append a LZCNTW instruction to the active function.
|
|
// Operates on the global context.
|
|
func LZCNTW(mr, r operand.Op) { ctx.LZCNTW(mr, r) }
|
|
|
|
// MASKMOVDQU: Store Selected Bytes of Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MASKMOVDQU xmm xmm
|
|
// Construct and append a MASKMOVDQU instruction to the active function.
|
|
func (c *Context) MASKMOVDQU(x, x1 operand.Op) {
|
|
if inst, err := x86.MASKMOVDQU(x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MASKMOVDQU: Store Selected Bytes of Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MASKMOVDQU xmm xmm
|
|
// Construct and append a MASKMOVDQU instruction to the active function.
|
|
// Operates on the global context.
|
|
func MASKMOVDQU(x, x1 operand.Op) { ctx.MASKMOVDQU(x, x1) }
|
|
|
|
// MASKMOVOU: Store Selected Bytes of Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MASKMOVOU xmm xmm
|
|
// Construct and append a MASKMOVOU instruction to the active function.
|
|
func (c *Context) MASKMOVOU(x, x1 operand.Op) {
|
|
if inst, err := x86.MASKMOVOU(x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MASKMOVOU: Store Selected Bytes of Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MASKMOVOU xmm xmm
|
|
// Construct and append a MASKMOVOU instruction to the active function.
|
|
// Operates on the global context.
|
|
func MASKMOVOU(x, x1 operand.Op) { ctx.MASKMOVOU(x, x1) }
|
|
|
|
// MAXPD: Return Maximum Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MAXPD xmm xmm
|
|
// MAXPD m128 xmm
|
|
// Construct and append a MAXPD instruction to the active function.
|
|
func (c *Context) MAXPD(mx, x operand.Op) {
|
|
if inst, err := x86.MAXPD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MAXPD: Return Maximum Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MAXPD xmm xmm
|
|
// MAXPD m128 xmm
|
|
// Construct and append a MAXPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MAXPD(mx, x operand.Op) { ctx.MAXPD(mx, x) }
|
|
|
|
// MAXPS: Return Maximum Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MAXPS xmm xmm
|
|
// MAXPS m128 xmm
|
|
// Construct and append a MAXPS instruction to the active function.
|
|
func (c *Context) MAXPS(mx, x operand.Op) {
|
|
if inst, err := x86.MAXPS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MAXPS: Return Maximum Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MAXPS xmm xmm
|
|
// MAXPS m128 xmm
|
|
// Construct and append a MAXPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MAXPS(mx, x operand.Op) { ctx.MAXPS(mx, x) }
|
|
|
|
// MAXSD: Return Maximum Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MAXSD xmm xmm
|
|
// MAXSD m64 xmm
|
|
// Construct and append a MAXSD instruction to the active function.
|
|
func (c *Context) MAXSD(mx, x operand.Op) {
|
|
if inst, err := x86.MAXSD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MAXSD: Return Maximum Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MAXSD xmm xmm
|
|
// MAXSD m64 xmm
|
|
// Construct and append a MAXSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MAXSD(mx, x operand.Op) { ctx.MAXSD(mx, x) }
|
|
|
|
// MAXSS: Return Maximum Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MAXSS xmm xmm
|
|
// MAXSS m32 xmm
|
|
// Construct and append a MAXSS instruction to the active function.
|
|
func (c *Context) MAXSS(mx, x operand.Op) {
|
|
if inst, err := x86.MAXSS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MAXSS: Return Maximum Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MAXSS xmm xmm
|
|
// MAXSS m32 xmm
|
|
// Construct and append a MAXSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MAXSS(mx, x operand.Op) { ctx.MAXSS(mx, x) }
|
|
|
|
// MFENCE: Memory Fence.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MFENCE
|
|
// Construct and append a MFENCE instruction to the active function.
|
|
func (c *Context) MFENCE() {
|
|
if inst, err := x86.MFENCE(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MFENCE: Memory Fence.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MFENCE
|
|
// Construct and append a MFENCE instruction to the active function.
|
|
// Operates on the global context.
|
|
func MFENCE() { ctx.MFENCE() }
|
|
|
|
// MINPD: Return Minimum Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MINPD xmm xmm
|
|
// MINPD m128 xmm
|
|
// Construct and append a MINPD instruction to the active function.
|
|
func (c *Context) MINPD(mx, x operand.Op) {
|
|
if inst, err := x86.MINPD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MINPD: Return Minimum Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MINPD xmm xmm
|
|
// MINPD m128 xmm
|
|
// Construct and append a MINPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MINPD(mx, x operand.Op) { ctx.MINPD(mx, x) }
|
|
|
|
// MINPS: Return Minimum Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MINPS xmm xmm
|
|
// MINPS m128 xmm
|
|
// Construct and append a MINPS instruction to the active function.
|
|
func (c *Context) MINPS(mx, x operand.Op) {
|
|
if inst, err := x86.MINPS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MINPS: Return Minimum Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MINPS xmm xmm
|
|
// MINPS m128 xmm
|
|
// Construct and append a MINPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MINPS(mx, x operand.Op) { ctx.MINPS(mx, x) }
|
|
|
|
// MINSD: Return Minimum Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MINSD xmm xmm
|
|
// MINSD m64 xmm
|
|
// Construct and append a MINSD instruction to the active function.
|
|
func (c *Context) MINSD(mx, x operand.Op) {
|
|
if inst, err := x86.MINSD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MINSD: Return Minimum Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MINSD xmm xmm
|
|
// MINSD m64 xmm
|
|
// Construct and append a MINSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MINSD(mx, x operand.Op) { ctx.MINSD(mx, x) }
|
|
|
|
// MINSS: Return Minimum Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MINSS xmm xmm
|
|
// MINSS m32 xmm
|
|
// Construct and append a MINSS instruction to the active function.
|
|
func (c *Context) MINSS(mx, x operand.Op) {
|
|
if inst, err := x86.MINSS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MINSS: Return Minimum Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MINSS xmm xmm
|
|
// MINSS m32 xmm
|
|
// Construct and append a MINSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MINSS(mx, x operand.Op) { ctx.MINSS(mx, x) }
|
|
|
|
// MONITOR: Monitor a Linear Address Range.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MONITOR
|
|
// Construct and append a MONITOR instruction to the active function.
|
|
func (c *Context) MONITOR() {
|
|
if inst, err := x86.MONITOR(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MONITOR: Monitor a Linear Address Range.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MONITOR
|
|
// Construct and append a MONITOR instruction to the active function.
|
|
// Operates on the global context.
|
|
func MONITOR() { ctx.MONITOR() }
|
|
|
|
// MOVAPD: Move Aligned Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVAPD xmm xmm
|
|
// MOVAPD m128 xmm
|
|
// MOVAPD xmm m128
|
|
// Construct and append a MOVAPD instruction to the active function.
|
|
func (c *Context) MOVAPD(mx, mx1 operand.Op) {
|
|
if inst, err := x86.MOVAPD(mx, mx1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVAPD: Move Aligned Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVAPD xmm xmm
|
|
// MOVAPD m128 xmm
|
|
// MOVAPD xmm m128
|
|
// Construct and append a MOVAPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVAPD(mx, mx1 operand.Op) { ctx.MOVAPD(mx, mx1) }
|
|
|
|
// MOVAPS: Move Aligned Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVAPS xmm xmm
|
|
// MOVAPS m128 xmm
|
|
// MOVAPS xmm m128
|
|
// Construct and append a MOVAPS instruction to the active function.
|
|
func (c *Context) MOVAPS(mx, mx1 operand.Op) {
|
|
if inst, err := x86.MOVAPS(mx, mx1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVAPS: Move Aligned Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVAPS xmm xmm
|
|
// MOVAPS m128 xmm
|
|
// MOVAPS xmm m128
|
|
// Construct and append a MOVAPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVAPS(mx, mx1 operand.Op) { ctx.MOVAPS(mx, mx1) }
|
|
|
|
// MOVB: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVB imm8 r8
|
|
// MOVB r8 r8
|
|
// MOVB m8 r8
|
|
// MOVB imm8 m8
|
|
// MOVB r8 m8
|
|
// Construct and append a MOVB instruction to the active function.
|
|
func (c *Context) MOVB(imr, mr operand.Op) {
|
|
if inst, err := x86.MOVB(imr, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVB: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVB imm8 r8
|
|
// MOVB r8 r8
|
|
// MOVB m8 r8
|
|
// MOVB imm8 m8
|
|
// MOVB r8 m8
|
|
// Construct and append a MOVB instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVB(imr, mr operand.Op) { ctx.MOVB(imr, mr) }
|
|
|
|
// MOVBELL: Move Data After Swapping Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBELL m32 r32
|
|
// MOVBELL r32 m32
|
|
// Construct and append a MOVBELL instruction to the active function.
|
|
func (c *Context) MOVBELL(mr, mr1 operand.Op) {
|
|
if inst, err := x86.MOVBELL(mr, mr1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVBELL: Move Data After Swapping Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBELL m32 r32
|
|
// MOVBELL r32 m32
|
|
// Construct and append a MOVBELL instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVBELL(mr, mr1 operand.Op) { ctx.MOVBELL(mr, mr1) }
|
|
|
|
// MOVBEQQ: Move Data After Swapping Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBEQQ m64 r64
|
|
// MOVBEQQ r64 m64
|
|
// Construct and append a MOVBEQQ instruction to the active function.
|
|
func (c *Context) MOVBEQQ(mr, mr1 operand.Op) {
|
|
if inst, err := x86.MOVBEQQ(mr, mr1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVBEQQ: Move Data After Swapping Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBEQQ m64 r64
|
|
// MOVBEQQ r64 m64
|
|
// Construct and append a MOVBEQQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVBEQQ(mr, mr1 operand.Op) { ctx.MOVBEQQ(mr, mr1) }
|
|
|
|
// MOVBEWW: Move Data After Swapping Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBEWW m16 r16
|
|
// MOVBEWW r16 m16
|
|
// Construct and append a MOVBEWW instruction to the active function.
|
|
func (c *Context) MOVBEWW(mr, mr1 operand.Op) {
|
|
if inst, err := x86.MOVBEWW(mr, mr1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVBEWW: Move Data After Swapping Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBEWW m16 r16
|
|
// MOVBEWW r16 m16
|
|
// Construct and append a MOVBEWW instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVBEWW(mr, mr1 operand.Op) { ctx.MOVBEWW(mr, mr1) }
|
|
|
|
// MOVBLSX: Move with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBLSX r8 r32
|
|
// MOVBLSX m8 r32
|
|
// Construct and append a MOVBLSX instruction to the active function.
|
|
func (c *Context) MOVBLSX(mr, r operand.Op) {
|
|
if inst, err := x86.MOVBLSX(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVBLSX: Move with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBLSX r8 r32
|
|
// MOVBLSX m8 r32
|
|
// Construct and append a MOVBLSX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVBLSX(mr, r operand.Op) { ctx.MOVBLSX(mr, r) }
|
|
|
|
// MOVBLZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBLZX r8 r32
|
|
// MOVBLZX m8 r32
|
|
// Construct and append a MOVBLZX instruction to the active function.
|
|
func (c *Context) MOVBLZX(mr, r operand.Op) {
|
|
if inst, err := x86.MOVBLZX(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVBLZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBLZX r8 r32
|
|
// MOVBLZX m8 r32
|
|
// Construct and append a MOVBLZX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVBLZX(mr, r operand.Op) { ctx.MOVBLZX(mr, r) }
|
|
|
|
// MOVBQSX: Move with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBQSX r8 r64
|
|
// MOVBQSX m8 r64
|
|
// Construct and append a MOVBQSX instruction to the active function.
|
|
func (c *Context) MOVBQSX(mr, r operand.Op) {
|
|
if inst, err := x86.MOVBQSX(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVBQSX: Move with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBQSX r8 r64
|
|
// MOVBQSX m8 r64
|
|
// Construct and append a MOVBQSX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVBQSX(mr, r operand.Op) { ctx.MOVBQSX(mr, r) }
|
|
|
|
// MOVBQZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBQZX r8 r64
|
|
// MOVBQZX m8 r64
|
|
// Construct and append a MOVBQZX instruction to the active function.
|
|
func (c *Context) MOVBQZX(mr, r operand.Op) {
|
|
if inst, err := x86.MOVBQZX(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVBQZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBQZX r8 r64
|
|
// MOVBQZX m8 r64
|
|
// Construct and append a MOVBQZX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVBQZX(mr, r operand.Op) { ctx.MOVBQZX(mr, r) }
|
|
|
|
// MOVBWSX: Move with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBWSX r8 r16
|
|
// MOVBWSX m8 r16
|
|
// Construct and append a MOVBWSX instruction to the active function.
|
|
func (c *Context) MOVBWSX(mr, r operand.Op) {
|
|
if inst, err := x86.MOVBWSX(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVBWSX: Move with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBWSX r8 r16
|
|
// MOVBWSX m8 r16
|
|
// Construct and append a MOVBWSX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVBWSX(mr, r operand.Op) { ctx.MOVBWSX(mr, r) }
|
|
|
|
// MOVBWZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBWZX r8 r16
|
|
// MOVBWZX m8 r16
|
|
// Construct and append a MOVBWZX instruction to the active function.
|
|
func (c *Context) MOVBWZX(mr, r operand.Op) {
|
|
if inst, err := x86.MOVBWZX(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVBWZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBWZX r8 r16
|
|
// MOVBWZX m8 r16
|
|
// Construct and append a MOVBWZX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVBWZX(mr, r operand.Op) { ctx.MOVBWZX(mr, r) }
|
|
|
|
// MOVD: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVD imm32 r64
|
|
// MOVD imm64 r64
|
|
// MOVD r64 r64
|
|
// MOVD m64 r64
|
|
// MOVD imm32 m64
|
|
// MOVD r64 m64
|
|
// MOVD xmm r64
|
|
// MOVD r64 xmm
|
|
// MOVD xmm xmm
|
|
// MOVD m64 xmm
|
|
// MOVD xmm m64
|
|
// MOVD xmm r32
|
|
// MOVD r32 xmm
|
|
// MOVD m32 xmm
|
|
// MOVD xmm m32
|
|
// Construct and append a MOVD instruction to the active function.
|
|
func (c *Context) MOVD(imrx, mrx operand.Op) {
|
|
if inst, err := x86.MOVD(imrx, mrx); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVD: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVD imm32 r64
|
|
// MOVD imm64 r64
|
|
// MOVD r64 r64
|
|
// MOVD m64 r64
|
|
// MOVD imm32 m64
|
|
// MOVD r64 m64
|
|
// MOVD xmm r64
|
|
// MOVD r64 xmm
|
|
// MOVD xmm xmm
|
|
// MOVD m64 xmm
|
|
// MOVD xmm m64
|
|
// MOVD xmm r32
|
|
// MOVD r32 xmm
|
|
// MOVD m32 xmm
|
|
// MOVD xmm m32
|
|
// Construct and append a MOVD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVD(imrx, mrx operand.Op) { ctx.MOVD(imrx, mrx) }
|
|
|
|
// MOVDDUP: Move One Double-FP and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVDDUP xmm xmm
|
|
// MOVDDUP m64 xmm
|
|
// Construct and append a MOVDDUP instruction to the active function.
|
|
func (c *Context) MOVDDUP(mx, x operand.Op) {
|
|
if inst, err := x86.MOVDDUP(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVDDUP: Move One Double-FP and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVDDUP xmm xmm
|
|
// MOVDDUP m64 xmm
|
|
// Construct and append a MOVDDUP instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVDDUP(mx, x operand.Op) { ctx.MOVDDUP(mx, x) }
|
|
|
|
// MOVDQ2Q: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVDQ2Q imm32 r64
|
|
// MOVDQ2Q imm64 r64
|
|
// MOVDQ2Q r64 r64
|
|
// MOVDQ2Q m64 r64
|
|
// MOVDQ2Q imm32 m64
|
|
// MOVDQ2Q r64 m64
|
|
// MOVDQ2Q xmm r64
|
|
// MOVDQ2Q r64 xmm
|
|
// MOVDQ2Q xmm xmm
|
|
// MOVDQ2Q m64 xmm
|
|
// MOVDQ2Q xmm m64
|
|
// MOVDQ2Q xmm r32
|
|
// MOVDQ2Q r32 xmm
|
|
// MOVDQ2Q m32 xmm
|
|
// MOVDQ2Q xmm m32
|
|
// Construct and append a MOVDQ2Q instruction to the active function.
|
|
func (c *Context) MOVDQ2Q(imrx, mrx operand.Op) {
|
|
if inst, err := x86.MOVDQ2Q(imrx, mrx); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVDQ2Q: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVDQ2Q imm32 r64
|
|
// MOVDQ2Q imm64 r64
|
|
// MOVDQ2Q r64 r64
|
|
// MOVDQ2Q m64 r64
|
|
// MOVDQ2Q imm32 m64
|
|
// MOVDQ2Q r64 m64
|
|
// MOVDQ2Q xmm r64
|
|
// MOVDQ2Q r64 xmm
|
|
// MOVDQ2Q xmm xmm
|
|
// MOVDQ2Q m64 xmm
|
|
// MOVDQ2Q xmm m64
|
|
// MOVDQ2Q xmm r32
|
|
// MOVDQ2Q r32 xmm
|
|
// MOVDQ2Q m32 xmm
|
|
// MOVDQ2Q xmm m32
|
|
// Construct and append a MOVDQ2Q instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVDQ2Q(imrx, mrx operand.Op) { ctx.MOVDQ2Q(imrx, mrx) }
|
|
|
|
// MOVHLPS: Move Packed Single-Precision Floating-Point Values High to Low.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVHLPS xmm xmm
|
|
// Construct and append a MOVHLPS instruction to the active function.
|
|
func (c *Context) MOVHLPS(x, x1 operand.Op) {
|
|
if inst, err := x86.MOVHLPS(x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVHLPS: Move Packed Single-Precision Floating-Point Values High to Low.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVHLPS xmm xmm
|
|
// Construct and append a MOVHLPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVHLPS(x, x1 operand.Op) { ctx.MOVHLPS(x, x1) }
|
|
|
|
// MOVHPD: Move High Packed Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVHPD m64 xmm
|
|
// MOVHPD xmm m64
|
|
// Construct and append a MOVHPD instruction to the active function.
|
|
func (c *Context) MOVHPD(mx, mx1 operand.Op) {
|
|
if inst, err := x86.MOVHPD(mx, mx1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVHPD: Move High Packed Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVHPD m64 xmm
|
|
// MOVHPD xmm m64
|
|
// Construct and append a MOVHPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVHPD(mx, mx1 operand.Op) { ctx.MOVHPD(mx, mx1) }
|
|
|
|
// MOVHPS: Move High Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVHPS m64 xmm
|
|
// MOVHPS xmm m64
|
|
// Construct and append a MOVHPS instruction to the active function.
|
|
func (c *Context) MOVHPS(mx, mx1 operand.Op) {
|
|
if inst, err := x86.MOVHPS(mx, mx1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVHPS: Move High Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVHPS m64 xmm
|
|
// MOVHPS xmm m64
|
|
// Construct and append a MOVHPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVHPS(mx, mx1 operand.Op) { ctx.MOVHPS(mx, mx1) }
|
|
|
|
// MOVL: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVL imm32 r32
|
|
// MOVL r32 r32
|
|
// MOVL m32 r32
|
|
// MOVL imm32 m32
|
|
// MOVL r32 m32
|
|
// Construct and append a MOVL instruction to the active function.
|
|
func (c *Context) MOVL(imr, mr operand.Op) {
|
|
if inst, err := x86.MOVL(imr, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVL: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVL imm32 r32
|
|
// MOVL r32 r32
|
|
// MOVL m32 r32
|
|
// MOVL imm32 m32
|
|
// MOVL r32 m32
|
|
// Construct and append a MOVL instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVL(imr, mr operand.Op) { ctx.MOVL(imr, mr) }
|
|
|
|
// MOVLHPS: Move Packed Single-Precision Floating-Point Values Low to High.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVLHPS xmm xmm
|
|
// Construct and append a MOVLHPS instruction to the active function.
|
|
func (c *Context) MOVLHPS(x, x1 operand.Op) {
|
|
if inst, err := x86.MOVLHPS(x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVLHPS: Move Packed Single-Precision Floating-Point Values Low to High.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVLHPS xmm xmm
|
|
// Construct and append a MOVLHPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVLHPS(x, x1 operand.Op) { ctx.MOVLHPS(x, x1) }
|
|
|
|
// MOVLPD: Move Low Packed Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVLPD m64 xmm
|
|
// MOVLPD xmm m64
|
|
// Construct and append a MOVLPD instruction to the active function.
|
|
func (c *Context) MOVLPD(mx, mx1 operand.Op) {
|
|
if inst, err := x86.MOVLPD(mx, mx1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVLPD: Move Low Packed Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVLPD m64 xmm
|
|
// MOVLPD xmm m64
|
|
// Construct and append a MOVLPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVLPD(mx, mx1 operand.Op) { ctx.MOVLPD(mx, mx1) }
|
|
|
|
// MOVLPS: Move Low Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVLPS m64 xmm
|
|
// MOVLPS xmm m64
|
|
// Construct and append a MOVLPS instruction to the active function.
|
|
func (c *Context) MOVLPS(mx, mx1 operand.Op) {
|
|
if inst, err := x86.MOVLPS(mx, mx1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVLPS: Move Low Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVLPS m64 xmm
|
|
// MOVLPS xmm m64
|
|
// Construct and append a MOVLPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVLPS(mx, mx1 operand.Op) { ctx.MOVLPS(mx, mx1) }
|
|
|
|
// MOVLQSX: Move Doubleword to Quadword with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVLQSX r32 r64
|
|
// MOVLQSX m32 r64
|
|
// Construct and append a MOVLQSX instruction to the active function.
|
|
func (c *Context) MOVLQSX(mr, r operand.Op) {
|
|
if inst, err := x86.MOVLQSX(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVLQSX: Move Doubleword to Quadword with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVLQSX r32 r64
|
|
// MOVLQSX m32 r64
|
|
// Construct and append a MOVLQSX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVLQSX(mr, r operand.Op) { ctx.MOVLQSX(mr, r) }
|
|
|
|
// MOVLQZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVLQZX m32 r64
|
|
// Construct and append a MOVLQZX instruction to the active function.
|
|
func (c *Context) MOVLQZX(m, r operand.Op) {
|
|
if inst, err := x86.MOVLQZX(m, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVLQZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVLQZX m32 r64
|
|
// Construct and append a MOVLQZX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVLQZX(m, r operand.Op) { ctx.MOVLQZX(m, r) }
|
|
|
|
// MOVMSKPD: Extract Packed Double-Precision Floating-Point Sign Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVMSKPD xmm r32
|
|
// Construct and append a MOVMSKPD instruction to the active function.
|
|
func (c *Context) MOVMSKPD(x, r operand.Op) {
|
|
if inst, err := x86.MOVMSKPD(x, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVMSKPD: Extract Packed Double-Precision Floating-Point Sign Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVMSKPD xmm r32
|
|
// Construct and append a MOVMSKPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVMSKPD(x, r operand.Op) { ctx.MOVMSKPD(x, r) }
|
|
|
|
// MOVMSKPS: Extract Packed Single-Precision Floating-Point Sign Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVMSKPS xmm r32
|
|
// Construct and append a MOVMSKPS instruction to the active function.
|
|
func (c *Context) MOVMSKPS(x, r operand.Op) {
|
|
if inst, err := x86.MOVMSKPS(x, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVMSKPS: Extract Packed Single-Precision Floating-Point Sign Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVMSKPS xmm r32
|
|
// Construct and append a MOVMSKPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVMSKPS(x, r operand.Op) { ctx.MOVMSKPS(x, r) }
|
|
|
|
// MOVNTDQ: Store Double Quadword Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTDQ xmm m128
|
|
// Construct and append a MOVNTDQ instruction to the active function.
|
|
func (c *Context) MOVNTDQ(x, m operand.Op) {
|
|
if inst, err := x86.MOVNTDQ(x, m); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVNTDQ: Store Double Quadword Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTDQ xmm m128
|
|
// Construct and append a MOVNTDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVNTDQ(x, m operand.Op) { ctx.MOVNTDQ(x, m) }
|
|
|
|
// MOVNTDQA: Load Double Quadword Non-Temporal Aligned Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTDQA m128 xmm
|
|
// Construct and append a MOVNTDQA instruction to the active function.
|
|
func (c *Context) MOVNTDQA(m, x operand.Op) {
|
|
if inst, err := x86.MOVNTDQA(m, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVNTDQA: Load Double Quadword Non-Temporal Aligned Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTDQA m128 xmm
|
|
// Construct and append a MOVNTDQA instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVNTDQA(m, x operand.Op) { ctx.MOVNTDQA(m, x) }
|
|
|
|
// MOVNTIL: Store Doubleword Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTIL r32 m32
|
|
// Construct and append a MOVNTIL instruction to the active function.
|
|
func (c *Context) MOVNTIL(r, m operand.Op) {
|
|
if inst, err := x86.MOVNTIL(r, m); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVNTIL: Store Doubleword Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTIL r32 m32
|
|
// Construct and append a MOVNTIL instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVNTIL(r, m operand.Op) { ctx.MOVNTIL(r, m) }
|
|
|
|
// MOVNTIQ: Store Doubleword Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTIQ r64 m64
|
|
// Construct and append a MOVNTIQ instruction to the active function.
|
|
func (c *Context) MOVNTIQ(r, m operand.Op) {
|
|
if inst, err := x86.MOVNTIQ(r, m); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVNTIQ: Store Doubleword Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTIQ r64 m64
|
|
// Construct and append a MOVNTIQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVNTIQ(r, m operand.Op) { ctx.MOVNTIQ(r, m) }
|
|
|
|
// MOVNTO: Store Double Quadword Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTO xmm m128
|
|
// Construct and append a MOVNTO instruction to the active function.
|
|
func (c *Context) MOVNTO(x, m operand.Op) {
|
|
if inst, err := x86.MOVNTO(x, m); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVNTO: Store Double Quadword Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTO xmm m128
|
|
// Construct and append a MOVNTO instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVNTO(x, m operand.Op) { ctx.MOVNTO(x, m) }
|
|
|
|
// MOVNTPD: Store Packed Double-Precision Floating-Point Values Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTPD xmm m128
|
|
// Construct and append a MOVNTPD instruction to the active function.
|
|
func (c *Context) MOVNTPD(x, m operand.Op) {
|
|
if inst, err := x86.MOVNTPD(x, m); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVNTPD: Store Packed Double-Precision Floating-Point Values Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTPD xmm m128
|
|
// Construct and append a MOVNTPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVNTPD(x, m operand.Op) { ctx.MOVNTPD(x, m) }
|
|
|
|
// MOVNTPS: Store Packed Single-Precision Floating-Point Values Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTPS xmm m128
|
|
// Construct and append a MOVNTPS instruction to the active function.
|
|
func (c *Context) MOVNTPS(x, m operand.Op) {
|
|
if inst, err := x86.MOVNTPS(x, m); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVNTPS: Store Packed Single-Precision Floating-Point Values Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTPS xmm m128
|
|
// Construct and append a MOVNTPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVNTPS(x, m operand.Op) { ctx.MOVNTPS(x, m) }
|
|
|
|
// MOVO: Move Aligned Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVO xmm xmm
|
|
// MOVO m128 xmm
|
|
// MOVO xmm m128
|
|
// Construct and append a MOVO instruction to the active function.
|
|
func (c *Context) MOVO(mx, mx1 operand.Op) {
|
|
if inst, err := x86.MOVO(mx, mx1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVO: Move Aligned Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVO xmm xmm
|
|
// MOVO m128 xmm
|
|
// MOVO xmm m128
|
|
// Construct and append a MOVO instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVO(mx, mx1 operand.Op) { ctx.MOVO(mx, mx1) }
|
|
|
|
// MOVOA: Move Aligned Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVOA xmm xmm
|
|
// MOVOA m128 xmm
|
|
// MOVOA xmm m128
|
|
// Construct and append a MOVOA instruction to the active function.
|
|
func (c *Context) MOVOA(mx, mx1 operand.Op) {
|
|
if inst, err := x86.MOVOA(mx, mx1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVOA: Move Aligned Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVOA xmm xmm
|
|
// MOVOA m128 xmm
|
|
// MOVOA xmm m128
|
|
// Construct and append a MOVOA instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVOA(mx, mx1 operand.Op) { ctx.MOVOA(mx, mx1) }
|
|
|
|
// MOVOU: Move Unaligned Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVOU xmm xmm
|
|
// MOVOU m128 xmm
|
|
// MOVOU xmm m128
|
|
// Construct and append a MOVOU instruction to the active function.
|
|
func (c *Context) MOVOU(mx, mx1 operand.Op) {
|
|
if inst, err := x86.MOVOU(mx, mx1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVOU: Move Unaligned Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVOU xmm xmm
|
|
// MOVOU m128 xmm
|
|
// MOVOU xmm m128
|
|
// Construct and append a MOVOU instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVOU(mx, mx1 operand.Op) { ctx.MOVOU(mx, mx1) }
|
|
|
|
// MOVQ: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVQ imm32 r64
|
|
// MOVQ imm64 r64
|
|
// MOVQ r64 r64
|
|
// MOVQ m64 r64
|
|
// MOVQ imm32 m64
|
|
// MOVQ r64 m64
|
|
// MOVQ xmm r64
|
|
// MOVQ r64 xmm
|
|
// MOVQ xmm xmm
|
|
// MOVQ m64 xmm
|
|
// MOVQ xmm m64
|
|
// MOVQ xmm r32
|
|
// MOVQ r32 xmm
|
|
// MOVQ m32 xmm
|
|
// MOVQ xmm m32
|
|
// Construct and append a MOVQ instruction to the active function.
|
|
func (c *Context) MOVQ(imrx, mrx operand.Op) {
|
|
if inst, err := x86.MOVQ(imrx, mrx); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVQ: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVQ imm32 r64
|
|
// MOVQ imm64 r64
|
|
// MOVQ r64 r64
|
|
// MOVQ m64 r64
|
|
// MOVQ imm32 m64
|
|
// MOVQ r64 m64
|
|
// MOVQ xmm r64
|
|
// MOVQ r64 xmm
|
|
// MOVQ xmm xmm
|
|
// MOVQ m64 xmm
|
|
// MOVQ xmm m64
|
|
// MOVQ xmm r32
|
|
// MOVQ r32 xmm
|
|
// MOVQ m32 xmm
|
|
// MOVQ xmm m32
|
|
// Construct and append a MOVQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVQ(imrx, mrx operand.Op) { ctx.MOVQ(imrx, mrx) }
|
|
|
|
// MOVSD: Move Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVSD xmm xmm
|
|
// MOVSD m64 xmm
|
|
// MOVSD xmm m64
|
|
// Construct and append a MOVSD instruction to the active function.
|
|
func (c *Context) MOVSD(mx, mx1 operand.Op) {
|
|
if inst, err := x86.MOVSD(mx, mx1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVSD: Move Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVSD xmm xmm
|
|
// MOVSD m64 xmm
|
|
// MOVSD xmm m64
|
|
// Construct and append a MOVSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVSD(mx, mx1 operand.Op) { ctx.MOVSD(mx, mx1) }
|
|
|
|
// MOVSHDUP: Move Packed Single-FP High and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVSHDUP xmm xmm
|
|
// MOVSHDUP m128 xmm
|
|
// Construct and append a MOVSHDUP instruction to the active function.
|
|
func (c *Context) MOVSHDUP(mx, x operand.Op) {
|
|
if inst, err := x86.MOVSHDUP(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVSHDUP: Move Packed Single-FP High and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVSHDUP xmm xmm
|
|
// MOVSHDUP m128 xmm
|
|
// Construct and append a MOVSHDUP instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVSHDUP(mx, x operand.Op) { ctx.MOVSHDUP(mx, x) }
|
|
|
|
// MOVSLDUP: Move Packed Single-FP Low and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVSLDUP xmm xmm
|
|
// MOVSLDUP m128 xmm
|
|
// Construct and append a MOVSLDUP instruction to the active function.
|
|
func (c *Context) MOVSLDUP(mx, x operand.Op) {
|
|
if inst, err := x86.MOVSLDUP(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVSLDUP: Move Packed Single-FP Low and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVSLDUP xmm xmm
|
|
// MOVSLDUP m128 xmm
|
|
// Construct and append a MOVSLDUP instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVSLDUP(mx, x operand.Op) { ctx.MOVSLDUP(mx, x) }
|
|
|
|
// MOVSS: Move Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVSS xmm xmm
|
|
// MOVSS m32 xmm
|
|
// MOVSS xmm m32
|
|
// Construct and append a MOVSS instruction to the active function.
|
|
func (c *Context) MOVSS(mx, mx1 operand.Op) {
|
|
if inst, err := x86.MOVSS(mx, mx1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVSS: Move Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVSS xmm xmm
|
|
// MOVSS m32 xmm
|
|
// MOVSS xmm m32
|
|
// Construct and append a MOVSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVSS(mx, mx1 operand.Op) { ctx.MOVSS(mx, mx1) }
|
|
|
|
// MOVUPD: Move Unaligned Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVUPD xmm xmm
|
|
// MOVUPD m128 xmm
|
|
// MOVUPD xmm m128
|
|
// Construct and append a MOVUPD instruction to the active function.
|
|
func (c *Context) MOVUPD(mx, mx1 operand.Op) {
|
|
if inst, err := x86.MOVUPD(mx, mx1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVUPD: Move Unaligned Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVUPD xmm xmm
|
|
// MOVUPD m128 xmm
|
|
// MOVUPD xmm m128
|
|
// Construct and append a MOVUPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVUPD(mx, mx1 operand.Op) { ctx.MOVUPD(mx, mx1) }
|
|
|
|
// MOVUPS: Move Unaligned Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVUPS xmm xmm
|
|
// MOVUPS m128 xmm
|
|
// MOVUPS xmm m128
|
|
// Construct and append a MOVUPS instruction to the active function.
|
|
func (c *Context) MOVUPS(mx, mx1 operand.Op) {
|
|
if inst, err := x86.MOVUPS(mx, mx1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVUPS: Move Unaligned Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVUPS xmm xmm
|
|
// MOVUPS m128 xmm
|
|
// MOVUPS xmm m128
|
|
// Construct and append a MOVUPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVUPS(mx, mx1 operand.Op) { ctx.MOVUPS(mx, mx1) }
|
|
|
|
// MOVW: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVW imm16 r16
|
|
// MOVW r16 r16
|
|
// MOVW m16 r16
|
|
// MOVW imm16 m16
|
|
// MOVW r16 m16
|
|
// Construct and append a MOVW instruction to the active function.
|
|
func (c *Context) MOVW(imr, mr operand.Op) {
|
|
if inst, err := x86.MOVW(imr, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVW: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVW imm16 r16
|
|
// MOVW r16 r16
|
|
// MOVW m16 r16
|
|
// MOVW imm16 m16
|
|
// MOVW r16 m16
|
|
// Construct and append a MOVW instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVW(imr, mr operand.Op) { ctx.MOVW(imr, mr) }
|
|
|
|
// MOVWLSX: Move with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVWLSX r16 r32
|
|
// MOVWLSX m16 r32
|
|
// Construct and append a MOVWLSX instruction to the active function.
|
|
func (c *Context) MOVWLSX(mr, r operand.Op) {
|
|
if inst, err := x86.MOVWLSX(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVWLSX: Move with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVWLSX r16 r32
|
|
// MOVWLSX m16 r32
|
|
// Construct and append a MOVWLSX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVWLSX(mr, r operand.Op) { ctx.MOVWLSX(mr, r) }
|
|
|
|
// MOVWLZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVWLZX r16 r32
|
|
// MOVWLZX m16 r32
|
|
// Construct and append a MOVWLZX instruction to the active function.
|
|
func (c *Context) MOVWLZX(mr, r operand.Op) {
|
|
if inst, err := x86.MOVWLZX(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVWLZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVWLZX r16 r32
|
|
// MOVWLZX m16 r32
|
|
// Construct and append a MOVWLZX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVWLZX(mr, r operand.Op) { ctx.MOVWLZX(mr, r) }
|
|
|
|
// MOVWQSX: Move with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVWQSX r16 r64
|
|
// MOVWQSX m16 r64
|
|
// Construct and append a MOVWQSX instruction to the active function.
|
|
func (c *Context) MOVWQSX(mr, r operand.Op) {
|
|
if inst, err := x86.MOVWQSX(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVWQSX: Move with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVWQSX r16 r64
|
|
// MOVWQSX m16 r64
|
|
// Construct and append a MOVWQSX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVWQSX(mr, r operand.Op) { ctx.MOVWQSX(mr, r) }
|
|
|
|
// MOVWQZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVWQZX r16 r64
|
|
// MOVWQZX m16 r64
|
|
// Construct and append a MOVWQZX instruction to the active function.
|
|
func (c *Context) MOVWQZX(mr, r operand.Op) {
|
|
if inst, err := x86.MOVWQZX(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MOVWQZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVWQZX r16 r64
|
|
// MOVWQZX m16 r64
|
|
// Construct and append a MOVWQZX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVWQZX(mr, r operand.Op) { ctx.MOVWQZX(mr, r) }
|
|
|
|
// MPSADBW: Compute Multiple Packed Sums of Absolute Difference.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MPSADBW imm8 xmm xmm
|
|
// MPSADBW imm8 m128 xmm
|
|
// Construct and append a MPSADBW instruction to the active function.
|
|
func (c *Context) MPSADBW(i, mx, x operand.Op) {
|
|
if inst, err := x86.MPSADBW(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MPSADBW: Compute Multiple Packed Sums of Absolute Difference.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MPSADBW imm8 xmm xmm
|
|
// MPSADBW imm8 m128 xmm
|
|
// Construct and append a MPSADBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func MPSADBW(i, mx, x operand.Op) { ctx.MPSADBW(i, mx, x) }
|
|
|
|
// MULB: Unsigned Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULB r8
|
|
// MULB m8
|
|
// Construct and append a MULB instruction to the active function.
|
|
func (c *Context) MULB(mr operand.Op) {
|
|
if inst, err := x86.MULB(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MULB: Unsigned Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULB r8
|
|
// MULB m8
|
|
// Construct and append a MULB instruction to the active function.
|
|
// Operates on the global context.
|
|
func MULB(mr operand.Op) { ctx.MULB(mr) }
|
|
|
|
// MULL: Unsigned Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULL r32
|
|
// MULL m32
|
|
// Construct and append a MULL instruction to the active function.
|
|
func (c *Context) MULL(mr operand.Op) {
|
|
if inst, err := x86.MULL(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MULL: Unsigned Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULL r32
|
|
// MULL m32
|
|
// Construct and append a MULL instruction to the active function.
|
|
// Operates on the global context.
|
|
func MULL(mr operand.Op) { ctx.MULL(mr) }
|
|
|
|
// MULPD: Multiply Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULPD xmm xmm
|
|
// MULPD m128 xmm
|
|
// Construct and append a MULPD instruction to the active function.
|
|
func (c *Context) MULPD(mx, x operand.Op) {
|
|
if inst, err := x86.MULPD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MULPD: Multiply Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULPD xmm xmm
|
|
// MULPD m128 xmm
|
|
// Construct and append a MULPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MULPD(mx, x operand.Op) { ctx.MULPD(mx, x) }
|
|
|
|
// MULPS: Multiply Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULPS xmm xmm
|
|
// MULPS m128 xmm
|
|
// Construct and append a MULPS instruction to the active function.
|
|
func (c *Context) MULPS(mx, x operand.Op) {
|
|
if inst, err := x86.MULPS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MULPS: Multiply Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULPS xmm xmm
|
|
// MULPS m128 xmm
|
|
// Construct and append a MULPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MULPS(mx, x operand.Op) { ctx.MULPS(mx, x) }
|
|
|
|
// MULQ: Unsigned Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULQ r64
|
|
// MULQ m64
|
|
// Construct and append a MULQ instruction to the active function.
|
|
func (c *Context) MULQ(mr operand.Op) {
|
|
if inst, err := x86.MULQ(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MULQ: Unsigned Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULQ r64
|
|
// MULQ m64
|
|
// Construct and append a MULQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func MULQ(mr operand.Op) { ctx.MULQ(mr) }
|
|
|
|
// MULSD: Multiply Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULSD xmm xmm
|
|
// MULSD m64 xmm
|
|
// Construct and append a MULSD instruction to the active function.
|
|
func (c *Context) MULSD(mx, x operand.Op) {
|
|
if inst, err := x86.MULSD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MULSD: Multiply Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULSD xmm xmm
|
|
// MULSD m64 xmm
|
|
// Construct and append a MULSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MULSD(mx, x operand.Op) { ctx.MULSD(mx, x) }
|
|
|
|
// MULSS: Multiply Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULSS xmm xmm
|
|
// MULSS m32 xmm
|
|
// Construct and append a MULSS instruction to the active function.
|
|
func (c *Context) MULSS(mx, x operand.Op) {
|
|
if inst, err := x86.MULSS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MULSS: Multiply Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULSS xmm xmm
|
|
// MULSS m32 xmm
|
|
// Construct and append a MULSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MULSS(mx, x operand.Op) { ctx.MULSS(mx, x) }
|
|
|
|
// MULW: Unsigned Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULW r16
|
|
// MULW m16
|
|
// Construct and append a MULW instruction to the active function.
|
|
func (c *Context) MULW(mr operand.Op) {
|
|
if inst, err := x86.MULW(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MULW: Unsigned Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULW r16
|
|
// MULW m16
|
|
// Construct and append a MULW instruction to the active function.
|
|
// Operates on the global context.
|
|
func MULW(mr operand.Op) { ctx.MULW(mr) }
|
|
|
|
// MULXL: Unsigned Multiply Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULXL r32 r32 r32
|
|
// MULXL m32 r32 r32
|
|
// Construct and append a MULXL instruction to the active function.
|
|
func (c *Context) MULXL(mr, r, r1 operand.Op) {
|
|
if inst, err := x86.MULXL(mr, r, r1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MULXL: Unsigned Multiply Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULXL r32 r32 r32
|
|
// MULXL m32 r32 r32
|
|
// Construct and append a MULXL instruction to the active function.
|
|
// Operates on the global context.
|
|
func MULXL(mr, r, r1 operand.Op) { ctx.MULXL(mr, r, r1) }
|
|
|
|
// MULXQ: Unsigned Multiply Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULXQ r64 r64 r64
|
|
// MULXQ m64 r64 r64
|
|
// Construct and append a MULXQ instruction to the active function.
|
|
func (c *Context) MULXQ(mr, r, r1 operand.Op) {
|
|
if inst, err := x86.MULXQ(mr, r, r1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MULXQ: Unsigned Multiply Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULXQ r64 r64 r64
|
|
// MULXQ m64 r64 r64
|
|
// Construct and append a MULXQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func MULXQ(mr, r, r1 operand.Op) { ctx.MULXQ(mr, r, r1) }
|
|
|
|
// MWAIT: Monitor Wait.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MWAIT
|
|
// Construct and append a MWAIT instruction to the active function.
|
|
func (c *Context) MWAIT() {
|
|
if inst, err := x86.MWAIT(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// MWAIT: Monitor Wait.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MWAIT
|
|
// Construct and append a MWAIT instruction to the active function.
|
|
// Operates on the global context.
|
|
func MWAIT() { ctx.MWAIT() }
|
|
|
|
// NEGB: Two's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NEGB r8
|
|
// NEGB m8
|
|
// Construct and append a NEGB instruction to the active function.
|
|
func (c *Context) NEGB(mr operand.Op) {
|
|
if inst, err := x86.NEGB(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// NEGB: Two's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NEGB r8
|
|
// NEGB m8
|
|
// Construct and append a NEGB instruction to the active function.
|
|
// Operates on the global context.
|
|
func NEGB(mr operand.Op) { ctx.NEGB(mr) }
|
|
|
|
// NEGL: Two's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NEGL r32
|
|
// NEGL m32
|
|
// Construct and append a NEGL instruction to the active function.
|
|
func (c *Context) NEGL(mr operand.Op) {
|
|
if inst, err := x86.NEGL(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// NEGL: Two's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NEGL r32
|
|
// NEGL m32
|
|
// Construct and append a NEGL instruction to the active function.
|
|
// Operates on the global context.
|
|
func NEGL(mr operand.Op) { ctx.NEGL(mr) }
|
|
|
|
// NEGQ: Two's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NEGQ r64
|
|
// NEGQ m64
|
|
// Construct and append a NEGQ instruction to the active function.
|
|
func (c *Context) NEGQ(mr operand.Op) {
|
|
if inst, err := x86.NEGQ(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// NEGQ: Two's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NEGQ r64
|
|
// NEGQ m64
|
|
// Construct and append a NEGQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func NEGQ(mr operand.Op) { ctx.NEGQ(mr) }
|
|
|
|
// NEGW: Two's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NEGW r16
|
|
// NEGW m16
|
|
// Construct and append a NEGW instruction to the active function.
|
|
func (c *Context) NEGW(mr operand.Op) {
|
|
if inst, err := x86.NEGW(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// NEGW: Two's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NEGW r16
|
|
// NEGW m16
|
|
// Construct and append a NEGW instruction to the active function.
|
|
// Operates on the global context.
|
|
func NEGW(mr operand.Op) { ctx.NEGW(mr) }
|
|
|
|
// NOP: No Operation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NOP
|
|
// Construct and append a NOP instruction to the active function.
|
|
func (c *Context) NOP() {
|
|
if inst, err := x86.NOP(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// NOP: No Operation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NOP
|
|
// Construct and append a NOP instruction to the active function.
|
|
// Operates on the global context.
|
|
func NOP() { ctx.NOP() }
|
|
|
|
// NOTB: One's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NOTB r8
|
|
// NOTB m8
|
|
// Construct and append a NOTB instruction to the active function.
|
|
func (c *Context) NOTB(mr operand.Op) {
|
|
if inst, err := x86.NOTB(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// NOTB: One's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NOTB r8
|
|
// NOTB m8
|
|
// Construct and append a NOTB instruction to the active function.
|
|
// Operates on the global context.
|
|
func NOTB(mr operand.Op) { ctx.NOTB(mr) }
|
|
|
|
// NOTL: One's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NOTL r32
|
|
// NOTL m32
|
|
// Construct and append a NOTL instruction to the active function.
|
|
func (c *Context) NOTL(mr operand.Op) {
|
|
if inst, err := x86.NOTL(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// NOTL: One's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NOTL r32
|
|
// NOTL m32
|
|
// Construct and append a NOTL instruction to the active function.
|
|
// Operates on the global context.
|
|
func NOTL(mr operand.Op) { ctx.NOTL(mr) }
|
|
|
|
// NOTQ: One's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NOTQ r64
|
|
// NOTQ m64
|
|
// Construct and append a NOTQ instruction to the active function.
|
|
func (c *Context) NOTQ(mr operand.Op) {
|
|
if inst, err := x86.NOTQ(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// NOTQ: One's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NOTQ r64
|
|
// NOTQ m64
|
|
// Construct and append a NOTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func NOTQ(mr operand.Op) { ctx.NOTQ(mr) }
|
|
|
|
// NOTW: One's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NOTW r16
|
|
// NOTW m16
|
|
// Construct and append a NOTW instruction to the active function.
|
|
func (c *Context) NOTW(mr operand.Op) {
|
|
if inst, err := x86.NOTW(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// NOTW: One's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NOTW r16
|
|
// NOTW m16
|
|
// Construct and append a NOTW instruction to the active function.
|
|
// Operates on the global context.
|
|
func NOTW(mr operand.Op) { ctx.NOTW(mr) }
|
|
|
|
// ORB: Logical Inclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORB imm8 al
|
|
// ORB imm8 r8
|
|
// ORB r8 r8
|
|
// ORB m8 r8
|
|
// ORB imm8 m8
|
|
// ORB r8 m8
|
|
// Construct and append a ORB instruction to the active function.
|
|
func (c *Context) ORB(imr, amr operand.Op) {
|
|
if inst, err := x86.ORB(imr, amr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ORB: Logical Inclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORB imm8 al
|
|
// ORB imm8 r8
|
|
// ORB r8 r8
|
|
// ORB m8 r8
|
|
// ORB imm8 m8
|
|
// ORB r8 m8
|
|
// Construct and append a ORB instruction to the active function.
|
|
// Operates on the global context.
|
|
func ORB(imr, amr operand.Op) { ctx.ORB(imr, amr) }
|
|
|
|
// ORL: Logical Inclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORL imm32 eax
|
|
// ORL imm8 r32
|
|
// ORL imm32 r32
|
|
// ORL r32 r32
|
|
// ORL m32 r32
|
|
// ORL imm8 m32
|
|
// ORL imm32 m32
|
|
// ORL r32 m32
|
|
// Construct and append a ORL instruction to the active function.
|
|
func (c *Context) ORL(imr, emr operand.Op) {
|
|
if inst, err := x86.ORL(imr, emr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ORL: Logical Inclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORL imm32 eax
|
|
// ORL imm8 r32
|
|
// ORL imm32 r32
|
|
// ORL r32 r32
|
|
// ORL m32 r32
|
|
// ORL imm8 m32
|
|
// ORL imm32 m32
|
|
// ORL r32 m32
|
|
// Construct and append a ORL instruction to the active function.
|
|
// Operates on the global context.
|
|
func ORL(imr, emr operand.Op) { ctx.ORL(imr, emr) }
|
|
|
|
// ORPD: Bitwise Logical OR of Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORPD xmm xmm
|
|
// ORPD m128 xmm
|
|
// Construct and append a ORPD instruction to the active function.
|
|
func (c *Context) ORPD(mx, x operand.Op) {
|
|
if inst, err := x86.ORPD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ORPD: Bitwise Logical OR of Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORPD xmm xmm
|
|
// ORPD m128 xmm
|
|
// Construct and append a ORPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func ORPD(mx, x operand.Op) { ctx.ORPD(mx, x) }
|
|
|
|
// ORPS: Bitwise Logical OR of Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORPS xmm xmm
|
|
// ORPS m128 xmm
|
|
// Construct and append a ORPS instruction to the active function.
|
|
func (c *Context) ORPS(mx, x operand.Op) {
|
|
if inst, err := x86.ORPS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ORPS: Bitwise Logical OR of Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORPS xmm xmm
|
|
// ORPS m128 xmm
|
|
// Construct and append a ORPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func ORPS(mx, x operand.Op) { ctx.ORPS(mx, x) }
|
|
|
|
// ORQ: Logical Inclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORQ imm32 rax
|
|
// ORQ imm8 r64
|
|
// ORQ imm32 r64
|
|
// ORQ r64 r64
|
|
// ORQ m64 r64
|
|
// ORQ imm8 m64
|
|
// ORQ imm32 m64
|
|
// ORQ r64 m64
|
|
// Construct and append a ORQ instruction to the active function.
|
|
func (c *Context) ORQ(imr, mr operand.Op) {
|
|
if inst, err := x86.ORQ(imr, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ORQ: Logical Inclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORQ imm32 rax
|
|
// ORQ imm8 r64
|
|
// ORQ imm32 r64
|
|
// ORQ r64 r64
|
|
// ORQ m64 r64
|
|
// ORQ imm8 m64
|
|
// ORQ imm32 m64
|
|
// ORQ r64 m64
|
|
// Construct and append a ORQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func ORQ(imr, mr operand.Op) { ctx.ORQ(imr, mr) }
|
|
|
|
// ORW: Logical Inclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORW imm16 ax
|
|
// ORW imm8 r16
|
|
// ORW imm16 r16
|
|
// ORW r16 r16
|
|
// ORW m16 r16
|
|
// ORW imm8 m16
|
|
// ORW imm16 m16
|
|
// ORW r16 m16
|
|
// Construct and append a ORW instruction to the active function.
|
|
func (c *Context) ORW(imr, amr operand.Op) {
|
|
if inst, err := x86.ORW(imr, amr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ORW: Logical Inclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORW imm16 ax
|
|
// ORW imm8 r16
|
|
// ORW imm16 r16
|
|
// ORW r16 r16
|
|
// ORW m16 r16
|
|
// ORW imm8 m16
|
|
// ORW imm16 m16
|
|
// ORW r16 m16
|
|
// Construct and append a ORW instruction to the active function.
|
|
// Operates on the global context.
|
|
func ORW(imr, amr operand.Op) { ctx.ORW(imr, amr) }
|
|
|
|
// PABSB: Packed Absolute Value of Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PABSB xmm xmm
|
|
// PABSB m128 xmm
|
|
// Construct and append a PABSB instruction to the active function.
|
|
func (c *Context) PABSB(mx, x operand.Op) {
|
|
if inst, err := x86.PABSB(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PABSB: Packed Absolute Value of Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PABSB xmm xmm
|
|
// PABSB m128 xmm
|
|
// Construct and append a PABSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PABSB(mx, x operand.Op) { ctx.PABSB(mx, x) }
|
|
|
|
// PABSD: Packed Absolute Value of Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PABSD xmm xmm
|
|
// PABSD m128 xmm
|
|
// Construct and append a PABSD instruction to the active function.
|
|
func (c *Context) PABSD(mx, x operand.Op) {
|
|
if inst, err := x86.PABSD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PABSD: Packed Absolute Value of Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PABSD xmm xmm
|
|
// PABSD m128 xmm
|
|
// Construct and append a PABSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PABSD(mx, x operand.Op) { ctx.PABSD(mx, x) }
|
|
|
|
// PABSW: Packed Absolute Value of Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PABSW xmm xmm
|
|
// PABSW m128 xmm
|
|
// Construct and append a PABSW instruction to the active function.
|
|
func (c *Context) PABSW(mx, x operand.Op) {
|
|
if inst, err := x86.PABSW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PABSW: Packed Absolute Value of Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PABSW xmm xmm
|
|
// PABSW m128 xmm
|
|
// Construct and append a PABSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PABSW(mx, x operand.Op) { ctx.PABSW(mx, x) }
|
|
|
|
// PACKSSLW: Pack Doublewords into Words with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PACKSSLW xmm xmm
|
|
// PACKSSLW m128 xmm
|
|
// Construct and append a PACKSSLW instruction to the active function.
|
|
func (c *Context) PACKSSLW(mx, x operand.Op) {
|
|
if inst, err := x86.PACKSSLW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PACKSSLW: Pack Doublewords into Words with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PACKSSLW xmm xmm
|
|
// PACKSSLW m128 xmm
|
|
// Construct and append a PACKSSLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PACKSSLW(mx, x operand.Op) { ctx.PACKSSLW(mx, x) }
|
|
|
|
// PACKSSWB: Pack Words into Bytes with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PACKSSWB xmm xmm
|
|
// PACKSSWB m128 xmm
|
|
// Construct and append a PACKSSWB instruction to the active function.
|
|
func (c *Context) PACKSSWB(mx, x operand.Op) {
|
|
if inst, err := x86.PACKSSWB(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PACKSSWB: Pack Words into Bytes with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PACKSSWB xmm xmm
|
|
// PACKSSWB m128 xmm
|
|
// Construct and append a PACKSSWB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PACKSSWB(mx, x operand.Op) { ctx.PACKSSWB(mx, x) }
|
|
|
|
// PACKUSDW: Pack Doublewords into Words with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PACKUSDW xmm xmm
|
|
// PACKUSDW m128 xmm
|
|
// Construct and append a PACKUSDW instruction to the active function.
|
|
func (c *Context) PACKUSDW(mx, x operand.Op) {
|
|
if inst, err := x86.PACKUSDW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PACKUSDW: Pack Doublewords into Words with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PACKUSDW xmm xmm
|
|
// PACKUSDW m128 xmm
|
|
// Construct and append a PACKUSDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PACKUSDW(mx, x operand.Op) { ctx.PACKUSDW(mx, x) }
|
|
|
|
// PACKUSWB: Pack Words into Bytes with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PACKUSWB xmm xmm
|
|
// PACKUSWB m128 xmm
|
|
// Construct and append a PACKUSWB instruction to the active function.
|
|
func (c *Context) PACKUSWB(mx, x operand.Op) {
|
|
if inst, err := x86.PACKUSWB(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PACKUSWB: Pack Words into Bytes with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PACKUSWB xmm xmm
|
|
// PACKUSWB m128 xmm
|
|
// Construct and append a PACKUSWB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PACKUSWB(mx, x operand.Op) { ctx.PACKUSWB(mx, x) }
|
|
|
|
// PADDB: Add Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDB xmm xmm
|
|
// PADDB m128 xmm
|
|
// Construct and append a PADDB instruction to the active function.
|
|
func (c *Context) PADDB(mx, x operand.Op) {
|
|
if inst, err := x86.PADDB(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PADDB: Add Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDB xmm xmm
|
|
// PADDB m128 xmm
|
|
// Construct and append a PADDB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PADDB(mx, x operand.Op) { ctx.PADDB(mx, x) }
|
|
|
|
// PADDD: Add Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDD xmm xmm
|
|
// PADDD m128 xmm
|
|
// Construct and append a PADDD instruction to the active function.
|
|
func (c *Context) PADDD(mx, x operand.Op) {
|
|
if inst, err := x86.PADDD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PADDD: Add Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDD xmm xmm
|
|
// PADDD m128 xmm
|
|
// Construct and append a PADDD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PADDD(mx, x operand.Op) { ctx.PADDD(mx, x) }
|
|
|
|
// PADDL: Add Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDL xmm xmm
|
|
// PADDL m128 xmm
|
|
// Construct and append a PADDL instruction to the active function.
|
|
func (c *Context) PADDL(mx, x operand.Op) {
|
|
if inst, err := x86.PADDL(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PADDL: Add Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDL xmm xmm
|
|
// PADDL m128 xmm
|
|
// Construct and append a PADDL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PADDL(mx, x operand.Op) { ctx.PADDL(mx, x) }
|
|
|
|
// PADDQ: Add Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDQ xmm xmm
|
|
// PADDQ m128 xmm
|
|
// Construct and append a PADDQ instruction to the active function.
|
|
func (c *Context) PADDQ(mx, x operand.Op) {
|
|
if inst, err := x86.PADDQ(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PADDQ: Add Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDQ xmm xmm
|
|
// PADDQ m128 xmm
|
|
// Construct and append a PADDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PADDQ(mx, x operand.Op) { ctx.PADDQ(mx, x) }
|
|
|
|
// PADDSB: Add Packed Signed Byte Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDSB xmm xmm
|
|
// PADDSB m128 xmm
|
|
// Construct and append a PADDSB instruction to the active function.
|
|
func (c *Context) PADDSB(mx, x operand.Op) {
|
|
if inst, err := x86.PADDSB(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PADDSB: Add Packed Signed Byte Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDSB xmm xmm
|
|
// PADDSB m128 xmm
|
|
// Construct and append a PADDSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PADDSB(mx, x operand.Op) { ctx.PADDSB(mx, x) }
|
|
|
|
// PADDSW: Add Packed Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDSW xmm xmm
|
|
// PADDSW m128 xmm
|
|
// Construct and append a PADDSW instruction to the active function.
|
|
func (c *Context) PADDSW(mx, x operand.Op) {
|
|
if inst, err := x86.PADDSW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PADDSW: Add Packed Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDSW xmm xmm
|
|
// PADDSW m128 xmm
|
|
// Construct and append a PADDSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PADDSW(mx, x operand.Op) { ctx.PADDSW(mx, x) }
|
|
|
|
// PADDUSB: Add Packed Unsigned Byte Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDUSB xmm xmm
|
|
// PADDUSB m128 xmm
|
|
// Construct and append a PADDUSB instruction to the active function.
|
|
func (c *Context) PADDUSB(mx, x operand.Op) {
|
|
if inst, err := x86.PADDUSB(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PADDUSB: Add Packed Unsigned Byte Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDUSB xmm xmm
|
|
// PADDUSB m128 xmm
|
|
// Construct and append a PADDUSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PADDUSB(mx, x operand.Op) { ctx.PADDUSB(mx, x) }
|
|
|
|
// PADDUSW: Add Packed Unsigned Word Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDUSW xmm xmm
|
|
// PADDUSW m128 xmm
|
|
// Construct and append a PADDUSW instruction to the active function.
|
|
func (c *Context) PADDUSW(mx, x operand.Op) {
|
|
if inst, err := x86.PADDUSW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PADDUSW: Add Packed Unsigned Word Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDUSW xmm xmm
|
|
// PADDUSW m128 xmm
|
|
// Construct and append a PADDUSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PADDUSW(mx, x operand.Op) { ctx.PADDUSW(mx, x) }
|
|
|
|
// PADDW: Add Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDW xmm xmm
|
|
// PADDW m128 xmm
|
|
// Construct and append a PADDW instruction to the active function.
|
|
func (c *Context) PADDW(mx, x operand.Op) {
|
|
if inst, err := x86.PADDW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PADDW: Add Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDW xmm xmm
|
|
// PADDW m128 xmm
|
|
// Construct and append a PADDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PADDW(mx, x operand.Op) { ctx.PADDW(mx, x) }
|
|
|
|
// PALIGNR: Packed Align Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PALIGNR imm8 xmm xmm
|
|
// PALIGNR imm8 m128 xmm
|
|
// Construct and append a PALIGNR instruction to the active function.
|
|
func (c *Context) PALIGNR(i, mx, x operand.Op) {
|
|
if inst, err := x86.PALIGNR(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PALIGNR: Packed Align Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PALIGNR imm8 xmm xmm
|
|
// PALIGNR imm8 m128 xmm
|
|
// Construct and append a PALIGNR instruction to the active function.
|
|
// Operates on the global context.
|
|
func PALIGNR(i, mx, x operand.Op) { ctx.PALIGNR(i, mx, x) }
|
|
|
|
// PAND: Packed Bitwise Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PAND xmm xmm
|
|
// PAND m128 xmm
|
|
// Construct and append a PAND instruction to the active function.
|
|
func (c *Context) PAND(mx, x operand.Op) {
|
|
if inst, err := x86.PAND(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PAND: Packed Bitwise Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PAND xmm xmm
|
|
// PAND m128 xmm
|
|
// Construct and append a PAND instruction to the active function.
|
|
// Operates on the global context.
|
|
func PAND(mx, x operand.Op) { ctx.PAND(mx, x) }
|
|
|
|
// PANDN: Packed Bitwise Logical AND NOT.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PANDN xmm xmm
|
|
// PANDN m128 xmm
|
|
// Construct and append a PANDN instruction to the active function.
|
|
func (c *Context) PANDN(mx, x operand.Op) {
|
|
if inst, err := x86.PANDN(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PANDN: Packed Bitwise Logical AND NOT.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PANDN xmm xmm
|
|
// PANDN m128 xmm
|
|
// Construct and append a PANDN instruction to the active function.
|
|
// Operates on the global context.
|
|
func PANDN(mx, x operand.Op) { ctx.PANDN(mx, x) }
|
|
|
|
// PAUSE: Spin Loop Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PAUSE
|
|
// Construct and append a PAUSE instruction to the active function.
|
|
func (c *Context) PAUSE() {
|
|
if inst, err := x86.PAUSE(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PAUSE: Spin Loop Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PAUSE
|
|
// Construct and append a PAUSE instruction to the active function.
|
|
// Operates on the global context.
|
|
func PAUSE() { ctx.PAUSE() }
|
|
|
|
// PAVGB: Average Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PAVGB xmm xmm
|
|
// PAVGB m128 xmm
|
|
// Construct and append a PAVGB instruction to the active function.
|
|
func (c *Context) PAVGB(mx, x operand.Op) {
|
|
if inst, err := x86.PAVGB(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PAVGB: Average Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PAVGB xmm xmm
|
|
// PAVGB m128 xmm
|
|
// Construct and append a PAVGB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PAVGB(mx, x operand.Op) { ctx.PAVGB(mx, x) }
|
|
|
|
// PAVGW: Average Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PAVGW xmm xmm
|
|
// PAVGW m128 xmm
|
|
// Construct and append a PAVGW instruction to the active function.
|
|
func (c *Context) PAVGW(mx, x operand.Op) {
|
|
if inst, err := x86.PAVGW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PAVGW: Average Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PAVGW xmm xmm
|
|
// PAVGW m128 xmm
|
|
// Construct and append a PAVGW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PAVGW(mx, x operand.Op) { ctx.PAVGW(mx, x) }
|
|
|
|
// PBLENDVB: Variable Blend Packed Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PBLENDVB xmm0 xmm xmm
|
|
// PBLENDVB xmm0 m128 xmm
|
|
// Construct and append a PBLENDVB instruction to the active function.
|
|
func (c *Context) PBLENDVB(x, mx, x1 operand.Op) {
|
|
if inst, err := x86.PBLENDVB(x, mx, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PBLENDVB: Variable Blend Packed Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PBLENDVB xmm0 xmm xmm
|
|
// PBLENDVB xmm0 m128 xmm
|
|
// Construct and append a PBLENDVB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PBLENDVB(x, mx, x1 operand.Op) { ctx.PBLENDVB(x, mx, x1) }
|
|
|
|
// PBLENDW: Blend Packed Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PBLENDW imm8 xmm xmm
|
|
// PBLENDW imm8 m128 xmm
|
|
// Construct and append a PBLENDW instruction to the active function.
|
|
func (c *Context) PBLENDW(i, mx, x operand.Op) {
|
|
if inst, err := x86.PBLENDW(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PBLENDW: Blend Packed Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PBLENDW imm8 xmm xmm
|
|
// PBLENDW imm8 m128 xmm
|
|
// Construct and append a PBLENDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PBLENDW(i, mx, x operand.Op) { ctx.PBLENDW(i, mx, x) }
|
|
|
|
// PCLMULQDQ: Carry-Less Quadword Multiplication.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCLMULQDQ imm8 xmm xmm
|
|
// PCLMULQDQ imm8 m128 xmm
|
|
// Construct and append a PCLMULQDQ instruction to the active function.
|
|
func (c *Context) PCLMULQDQ(i, mx, x operand.Op) {
|
|
if inst, err := x86.PCLMULQDQ(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PCLMULQDQ: Carry-Less Quadword Multiplication.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCLMULQDQ imm8 xmm xmm
|
|
// PCLMULQDQ imm8 m128 xmm
|
|
// Construct and append a PCLMULQDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCLMULQDQ(i, mx, x operand.Op) { ctx.PCLMULQDQ(i, mx, x) }
|
|
|
|
// PCMPEQB: Compare Packed Byte Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPEQB xmm xmm
|
|
// PCMPEQB m128 xmm
|
|
// Construct and append a PCMPEQB instruction to the active function.
|
|
func (c *Context) PCMPEQB(mx, x operand.Op) {
|
|
if inst, err := x86.PCMPEQB(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PCMPEQB: Compare Packed Byte Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPEQB xmm xmm
|
|
// PCMPEQB m128 xmm
|
|
// Construct and append a PCMPEQB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPEQB(mx, x operand.Op) { ctx.PCMPEQB(mx, x) }
|
|
|
|
// PCMPEQL: Compare Packed Doubleword Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPEQL xmm xmm
|
|
// PCMPEQL m128 xmm
|
|
// Construct and append a PCMPEQL instruction to the active function.
|
|
func (c *Context) PCMPEQL(mx, x operand.Op) {
|
|
if inst, err := x86.PCMPEQL(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PCMPEQL: Compare Packed Doubleword Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPEQL xmm xmm
|
|
// PCMPEQL m128 xmm
|
|
// Construct and append a PCMPEQL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPEQL(mx, x operand.Op) { ctx.PCMPEQL(mx, x) }
|
|
|
|
// PCMPEQQ: Compare Packed Quadword Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPEQQ xmm xmm
|
|
// PCMPEQQ m128 xmm
|
|
// Construct and append a PCMPEQQ instruction to the active function.
|
|
func (c *Context) PCMPEQQ(mx, x operand.Op) {
|
|
if inst, err := x86.PCMPEQQ(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PCMPEQQ: Compare Packed Quadword Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPEQQ xmm xmm
|
|
// PCMPEQQ m128 xmm
|
|
// Construct and append a PCMPEQQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPEQQ(mx, x operand.Op) { ctx.PCMPEQQ(mx, x) }
|
|
|
|
// PCMPEQW: Compare Packed Word Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPEQW xmm xmm
|
|
// PCMPEQW m128 xmm
|
|
// Construct and append a PCMPEQW instruction to the active function.
|
|
func (c *Context) PCMPEQW(mx, x operand.Op) {
|
|
if inst, err := x86.PCMPEQW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PCMPEQW: Compare Packed Word Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPEQW xmm xmm
|
|
// PCMPEQW m128 xmm
|
|
// Construct and append a PCMPEQW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPEQW(mx, x operand.Op) { ctx.PCMPEQW(mx, x) }
|
|
|
|
// PCMPESTRI: Packed Compare Explicit Length Strings, Return Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPESTRI imm8 xmm xmm
|
|
// PCMPESTRI imm8 m128 xmm
|
|
// Construct and append a PCMPESTRI instruction to the active function.
|
|
func (c *Context) PCMPESTRI(i, mx, x operand.Op) {
|
|
if inst, err := x86.PCMPESTRI(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PCMPESTRI: Packed Compare Explicit Length Strings, Return Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPESTRI imm8 xmm xmm
|
|
// PCMPESTRI imm8 m128 xmm
|
|
// Construct and append a PCMPESTRI instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPESTRI(i, mx, x operand.Op) { ctx.PCMPESTRI(i, mx, x) }
|
|
|
|
// PCMPESTRM: Packed Compare Explicit Length Strings, Return Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPESTRM imm8 xmm xmm
|
|
// PCMPESTRM imm8 m128 xmm
|
|
// Construct and append a PCMPESTRM instruction to the active function.
|
|
func (c *Context) PCMPESTRM(i, mx, x operand.Op) {
|
|
if inst, err := x86.PCMPESTRM(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PCMPESTRM: Packed Compare Explicit Length Strings, Return Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPESTRM imm8 xmm xmm
|
|
// PCMPESTRM imm8 m128 xmm
|
|
// Construct and append a PCMPESTRM instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPESTRM(i, mx, x operand.Op) { ctx.PCMPESTRM(i, mx, x) }
|
|
|
|
// PCMPGTB: Compare Packed Signed Byte Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPGTB xmm xmm
|
|
// PCMPGTB m128 xmm
|
|
// Construct and append a PCMPGTB instruction to the active function.
|
|
func (c *Context) PCMPGTB(mx, x operand.Op) {
|
|
if inst, err := x86.PCMPGTB(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PCMPGTB: Compare Packed Signed Byte Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPGTB xmm xmm
|
|
// PCMPGTB m128 xmm
|
|
// Construct and append a PCMPGTB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPGTB(mx, x operand.Op) { ctx.PCMPGTB(mx, x) }
|
|
|
|
// PCMPGTL: Compare Packed Signed Doubleword Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPGTL xmm xmm
|
|
// PCMPGTL m128 xmm
|
|
// Construct and append a PCMPGTL instruction to the active function.
|
|
func (c *Context) PCMPGTL(mx, x operand.Op) {
|
|
if inst, err := x86.PCMPGTL(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PCMPGTL: Compare Packed Signed Doubleword Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPGTL xmm xmm
|
|
// PCMPGTL m128 xmm
|
|
// Construct and append a PCMPGTL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPGTL(mx, x operand.Op) { ctx.PCMPGTL(mx, x) }
|
|
|
|
// PCMPGTQ: Compare Packed Data for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPGTQ xmm xmm
|
|
// PCMPGTQ m128 xmm
|
|
// Construct and append a PCMPGTQ instruction to the active function.
|
|
func (c *Context) PCMPGTQ(mx, x operand.Op) {
|
|
if inst, err := x86.PCMPGTQ(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PCMPGTQ: Compare Packed Data for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPGTQ xmm xmm
|
|
// PCMPGTQ m128 xmm
|
|
// Construct and append a PCMPGTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPGTQ(mx, x operand.Op) { ctx.PCMPGTQ(mx, x) }
|
|
|
|
// PCMPGTW: Compare Packed Signed Word Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPGTW xmm xmm
|
|
// PCMPGTW m128 xmm
|
|
// Construct and append a PCMPGTW instruction to the active function.
|
|
func (c *Context) PCMPGTW(mx, x operand.Op) {
|
|
if inst, err := x86.PCMPGTW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PCMPGTW: Compare Packed Signed Word Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPGTW xmm xmm
|
|
// PCMPGTW m128 xmm
|
|
// Construct and append a PCMPGTW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPGTW(mx, x operand.Op) { ctx.PCMPGTW(mx, x) }
|
|
|
|
// PCMPISTRI: Packed Compare Implicit Length Strings, Return Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPISTRI imm8 xmm xmm
|
|
// PCMPISTRI imm8 m128 xmm
|
|
// Construct and append a PCMPISTRI instruction to the active function.
|
|
func (c *Context) PCMPISTRI(i, mx, x operand.Op) {
|
|
if inst, err := x86.PCMPISTRI(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PCMPISTRI: Packed Compare Implicit Length Strings, Return Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPISTRI imm8 xmm xmm
|
|
// PCMPISTRI imm8 m128 xmm
|
|
// Construct and append a PCMPISTRI instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPISTRI(i, mx, x operand.Op) { ctx.PCMPISTRI(i, mx, x) }
|
|
|
|
// PCMPISTRM: Packed Compare Implicit Length Strings, Return Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPISTRM imm8 xmm xmm
|
|
// PCMPISTRM imm8 m128 xmm
|
|
// Construct and append a PCMPISTRM instruction to the active function.
|
|
func (c *Context) PCMPISTRM(i, mx, x operand.Op) {
|
|
if inst, err := x86.PCMPISTRM(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PCMPISTRM: Packed Compare Implicit Length Strings, Return Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPISTRM imm8 xmm xmm
|
|
// PCMPISTRM imm8 m128 xmm
|
|
// Construct and append a PCMPISTRM instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPISTRM(i, mx, x operand.Op) { ctx.PCMPISTRM(i, mx, x) }
|
|
|
|
// PDEPL: Parallel Bits Deposit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PDEPL r32 r32 r32
|
|
// PDEPL m32 r32 r32
|
|
// Construct and append a PDEPL instruction to the active function.
|
|
func (c *Context) PDEPL(mr, r, r1 operand.Op) {
|
|
if inst, err := x86.PDEPL(mr, r, r1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PDEPL: Parallel Bits Deposit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PDEPL r32 r32 r32
|
|
// PDEPL m32 r32 r32
|
|
// Construct and append a PDEPL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PDEPL(mr, r, r1 operand.Op) { ctx.PDEPL(mr, r, r1) }
|
|
|
|
// PDEPQ: Parallel Bits Deposit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PDEPQ r64 r64 r64
|
|
// PDEPQ m64 r64 r64
|
|
// Construct and append a PDEPQ instruction to the active function.
|
|
func (c *Context) PDEPQ(mr, r, r1 operand.Op) {
|
|
if inst, err := x86.PDEPQ(mr, r, r1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PDEPQ: Parallel Bits Deposit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PDEPQ r64 r64 r64
|
|
// PDEPQ m64 r64 r64
|
|
// Construct and append a PDEPQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PDEPQ(mr, r, r1 operand.Op) { ctx.PDEPQ(mr, r, r1) }
|
|
|
|
// PEXTL: Parallel Bits Extract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTL r32 r32 r32
|
|
// PEXTL m32 r32 r32
|
|
// Construct and append a PEXTL instruction to the active function.
|
|
func (c *Context) PEXTL(mr, r, r1 operand.Op) {
|
|
if inst, err := x86.PEXTL(mr, r, r1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PEXTL: Parallel Bits Extract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTL r32 r32 r32
|
|
// PEXTL m32 r32 r32
|
|
// Construct and append a PEXTL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PEXTL(mr, r, r1 operand.Op) { ctx.PEXTL(mr, r, r1) }
|
|
|
|
// PEXTQ: Parallel Bits Extract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTQ r64 r64 r64
|
|
// PEXTQ m64 r64 r64
|
|
// Construct and append a PEXTQ instruction to the active function.
|
|
func (c *Context) PEXTQ(mr, r, r1 operand.Op) {
|
|
if inst, err := x86.PEXTQ(mr, r, r1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PEXTQ: Parallel Bits Extract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTQ r64 r64 r64
|
|
// PEXTQ m64 r64 r64
|
|
// Construct and append a PEXTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PEXTQ(mr, r, r1 operand.Op) { ctx.PEXTQ(mr, r, r1) }
|
|
|
|
// PEXTRB: Extract Byte.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTRB imm8 xmm r32
|
|
// PEXTRB imm8 xmm m8
|
|
// Construct and append a PEXTRB instruction to the active function.
|
|
func (c *Context) PEXTRB(i, x, mr operand.Op) {
|
|
if inst, err := x86.PEXTRB(i, x, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PEXTRB: Extract Byte.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTRB imm8 xmm r32
|
|
// PEXTRB imm8 xmm m8
|
|
// Construct and append a PEXTRB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PEXTRB(i, x, mr operand.Op) { ctx.PEXTRB(i, x, mr) }
|
|
|
|
// PEXTRD: Extract Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTRD imm8 xmm r32
|
|
// PEXTRD imm8 xmm m32
|
|
// Construct and append a PEXTRD instruction to the active function.
|
|
func (c *Context) PEXTRD(i, x, mr operand.Op) {
|
|
if inst, err := x86.PEXTRD(i, x, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PEXTRD: Extract Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTRD imm8 xmm r32
|
|
// PEXTRD imm8 xmm m32
|
|
// Construct and append a PEXTRD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PEXTRD(i, x, mr operand.Op) { ctx.PEXTRD(i, x, mr) }
|
|
|
|
// PEXTRQ: Extract Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTRQ imm8 xmm r64
|
|
// PEXTRQ imm8 xmm m64
|
|
// Construct and append a PEXTRQ instruction to the active function.
|
|
func (c *Context) PEXTRQ(i, x, mr operand.Op) {
|
|
if inst, err := x86.PEXTRQ(i, x, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PEXTRQ: Extract Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTRQ imm8 xmm r64
|
|
// PEXTRQ imm8 xmm m64
|
|
// Construct and append a PEXTRQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PEXTRQ(i, x, mr operand.Op) { ctx.PEXTRQ(i, x, mr) }
|
|
|
|
// PEXTRW: Extract Word.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTRW imm8 xmm r32
|
|
// PEXTRW imm8 xmm m16
|
|
// Construct and append a PEXTRW instruction to the active function.
|
|
func (c *Context) PEXTRW(i, x, mr operand.Op) {
|
|
if inst, err := x86.PEXTRW(i, x, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PEXTRW: Extract Word.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTRW imm8 xmm r32
|
|
// PEXTRW imm8 xmm m16
|
|
// Construct and append a PEXTRW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PEXTRW(i, x, mr operand.Op) { ctx.PEXTRW(i, x, mr) }
|
|
|
|
// PHADDD: Packed Horizontal Add Doubleword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHADDD xmm xmm
|
|
// PHADDD m128 xmm
|
|
// Construct and append a PHADDD instruction to the active function.
|
|
func (c *Context) PHADDD(mx, x operand.Op) {
|
|
if inst, err := x86.PHADDD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PHADDD: Packed Horizontal Add Doubleword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHADDD xmm xmm
|
|
// PHADDD m128 xmm
|
|
// Construct and append a PHADDD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PHADDD(mx, x operand.Op) { ctx.PHADDD(mx, x) }
|
|
|
|
// PHADDSW: Packed Horizontal Add Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHADDSW xmm xmm
|
|
// PHADDSW m128 xmm
|
|
// Construct and append a PHADDSW instruction to the active function.
|
|
func (c *Context) PHADDSW(mx, x operand.Op) {
|
|
if inst, err := x86.PHADDSW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PHADDSW: Packed Horizontal Add Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHADDSW xmm xmm
|
|
// PHADDSW m128 xmm
|
|
// Construct and append a PHADDSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PHADDSW(mx, x operand.Op) { ctx.PHADDSW(mx, x) }
|
|
|
|
// PHADDW: Packed Horizontal Add Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHADDW xmm xmm
|
|
// PHADDW m128 xmm
|
|
// Construct and append a PHADDW instruction to the active function.
|
|
func (c *Context) PHADDW(mx, x operand.Op) {
|
|
if inst, err := x86.PHADDW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PHADDW: Packed Horizontal Add Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHADDW xmm xmm
|
|
// PHADDW m128 xmm
|
|
// Construct and append a PHADDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PHADDW(mx, x operand.Op) { ctx.PHADDW(mx, x) }
|
|
|
|
// PHMINPOSUW: Packed Horizontal Minimum of Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHMINPOSUW xmm xmm
|
|
// PHMINPOSUW m128 xmm
|
|
// Construct and append a PHMINPOSUW instruction to the active function.
|
|
func (c *Context) PHMINPOSUW(mx, x operand.Op) {
|
|
if inst, err := x86.PHMINPOSUW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PHMINPOSUW: Packed Horizontal Minimum of Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHMINPOSUW xmm xmm
|
|
// PHMINPOSUW m128 xmm
|
|
// Construct and append a PHMINPOSUW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PHMINPOSUW(mx, x operand.Op) { ctx.PHMINPOSUW(mx, x) }
|
|
|
|
// PHSUBD: Packed Horizontal Subtract Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHSUBD xmm xmm
|
|
// PHSUBD m128 xmm
|
|
// Construct and append a PHSUBD instruction to the active function.
|
|
func (c *Context) PHSUBD(mx, x operand.Op) {
|
|
if inst, err := x86.PHSUBD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PHSUBD: Packed Horizontal Subtract Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHSUBD xmm xmm
|
|
// PHSUBD m128 xmm
|
|
// Construct and append a PHSUBD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PHSUBD(mx, x operand.Op) { ctx.PHSUBD(mx, x) }
|
|
|
|
// PHSUBSW: Packed Horizontal Subtract Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHSUBSW xmm xmm
|
|
// PHSUBSW m128 xmm
|
|
// Construct and append a PHSUBSW instruction to the active function.
|
|
func (c *Context) PHSUBSW(mx, x operand.Op) {
|
|
if inst, err := x86.PHSUBSW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PHSUBSW: Packed Horizontal Subtract Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHSUBSW xmm xmm
|
|
// PHSUBSW m128 xmm
|
|
// Construct and append a PHSUBSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PHSUBSW(mx, x operand.Op) { ctx.PHSUBSW(mx, x) }
|
|
|
|
// PHSUBW: Packed Horizontal Subtract Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHSUBW xmm xmm
|
|
// PHSUBW m128 xmm
|
|
// Construct and append a PHSUBW instruction to the active function.
|
|
func (c *Context) PHSUBW(mx, x operand.Op) {
|
|
if inst, err := x86.PHSUBW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PHSUBW: Packed Horizontal Subtract Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHSUBW xmm xmm
|
|
// PHSUBW m128 xmm
|
|
// Construct and append a PHSUBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PHSUBW(mx, x operand.Op) { ctx.PHSUBW(mx, x) }
|
|
|
|
// PINSRB: Insert Byte.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PINSRB imm8 r32 xmm
|
|
// PINSRB imm8 m8 xmm
|
|
// Construct and append a PINSRB instruction to the active function.
|
|
func (c *Context) PINSRB(i, mr, x operand.Op) {
|
|
if inst, err := x86.PINSRB(i, mr, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PINSRB: Insert Byte.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PINSRB imm8 r32 xmm
|
|
// PINSRB imm8 m8 xmm
|
|
// Construct and append a PINSRB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PINSRB(i, mr, x operand.Op) { ctx.PINSRB(i, mr, x) }
|
|
|
|
// PINSRD: Insert Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PINSRD imm8 r32 xmm
|
|
// PINSRD imm8 m32 xmm
|
|
// Construct and append a PINSRD instruction to the active function.
|
|
func (c *Context) PINSRD(i, mr, x operand.Op) {
|
|
if inst, err := x86.PINSRD(i, mr, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PINSRD: Insert Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PINSRD imm8 r32 xmm
|
|
// PINSRD imm8 m32 xmm
|
|
// Construct and append a PINSRD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PINSRD(i, mr, x operand.Op) { ctx.PINSRD(i, mr, x) }
|
|
|
|
// PINSRQ: Insert Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PINSRQ imm8 r64 xmm
|
|
// PINSRQ imm8 m64 xmm
|
|
// Construct and append a PINSRQ instruction to the active function.
|
|
func (c *Context) PINSRQ(i, mr, x operand.Op) {
|
|
if inst, err := x86.PINSRQ(i, mr, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PINSRQ: Insert Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PINSRQ imm8 r64 xmm
|
|
// PINSRQ imm8 m64 xmm
|
|
// Construct and append a PINSRQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PINSRQ(i, mr, x operand.Op) { ctx.PINSRQ(i, mr, x) }
|
|
|
|
// PINSRW: Insert Word.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PINSRW imm8 r32 xmm
|
|
// PINSRW imm8 m16 xmm
|
|
// Construct and append a PINSRW instruction to the active function.
|
|
func (c *Context) PINSRW(i, mr, x operand.Op) {
|
|
if inst, err := x86.PINSRW(i, mr, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PINSRW: Insert Word.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PINSRW imm8 r32 xmm
|
|
// PINSRW imm8 m16 xmm
|
|
// Construct and append a PINSRW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PINSRW(i, mr, x operand.Op) { ctx.PINSRW(i, mr, x) }
|
|
|
|
// PMADDUBSW: Multiply and Add Packed Signed and Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMADDUBSW xmm xmm
|
|
// PMADDUBSW m128 xmm
|
|
// Construct and append a PMADDUBSW instruction to the active function.
|
|
func (c *Context) PMADDUBSW(mx, x operand.Op) {
|
|
if inst, err := x86.PMADDUBSW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMADDUBSW: Multiply and Add Packed Signed and Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMADDUBSW xmm xmm
|
|
// PMADDUBSW m128 xmm
|
|
// Construct and append a PMADDUBSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMADDUBSW(mx, x operand.Op) { ctx.PMADDUBSW(mx, x) }
|
|
|
|
// PMADDWL: Multiply and Add Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMADDWL xmm xmm
|
|
// PMADDWL m128 xmm
|
|
// Construct and append a PMADDWL instruction to the active function.
|
|
func (c *Context) PMADDWL(mx, x operand.Op) {
|
|
if inst, err := x86.PMADDWL(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMADDWL: Multiply and Add Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMADDWL xmm xmm
|
|
// PMADDWL m128 xmm
|
|
// Construct and append a PMADDWL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMADDWL(mx, x operand.Op) { ctx.PMADDWL(mx, x) }
|
|
|
|
// PMAXSB: Maximum of Packed Signed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXSB xmm xmm
|
|
// PMAXSB m128 xmm
|
|
// Construct and append a PMAXSB instruction to the active function.
|
|
func (c *Context) PMAXSB(mx, x operand.Op) {
|
|
if inst, err := x86.PMAXSB(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMAXSB: Maximum of Packed Signed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXSB xmm xmm
|
|
// PMAXSB m128 xmm
|
|
// Construct and append a PMAXSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMAXSB(mx, x operand.Op) { ctx.PMAXSB(mx, x) }
|
|
|
|
// PMAXSD: Maximum of Packed Signed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXSD xmm xmm
|
|
// PMAXSD m128 xmm
|
|
// Construct and append a PMAXSD instruction to the active function.
|
|
func (c *Context) PMAXSD(mx, x operand.Op) {
|
|
if inst, err := x86.PMAXSD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMAXSD: Maximum of Packed Signed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXSD xmm xmm
|
|
// PMAXSD m128 xmm
|
|
// Construct and append a PMAXSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMAXSD(mx, x operand.Op) { ctx.PMAXSD(mx, x) }
|
|
|
|
// PMAXSW: Maximum of Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXSW xmm xmm
|
|
// PMAXSW m128 xmm
|
|
// Construct and append a PMAXSW instruction to the active function.
|
|
func (c *Context) PMAXSW(mx, x operand.Op) {
|
|
if inst, err := x86.PMAXSW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMAXSW: Maximum of Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXSW xmm xmm
|
|
// PMAXSW m128 xmm
|
|
// Construct and append a PMAXSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMAXSW(mx, x operand.Op) { ctx.PMAXSW(mx, x) }
|
|
|
|
// PMAXUB: Maximum of Packed Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXUB xmm xmm
|
|
// PMAXUB m128 xmm
|
|
// Construct and append a PMAXUB instruction to the active function.
|
|
func (c *Context) PMAXUB(mx, x operand.Op) {
|
|
if inst, err := x86.PMAXUB(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMAXUB: Maximum of Packed Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXUB xmm xmm
|
|
// PMAXUB m128 xmm
|
|
// Construct and append a PMAXUB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMAXUB(mx, x operand.Op) { ctx.PMAXUB(mx, x) }
|
|
|
|
// PMAXUD: Maximum of Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXUD xmm xmm
|
|
// PMAXUD m128 xmm
|
|
// Construct and append a PMAXUD instruction to the active function.
|
|
func (c *Context) PMAXUD(mx, x operand.Op) {
|
|
if inst, err := x86.PMAXUD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMAXUD: Maximum of Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXUD xmm xmm
|
|
// PMAXUD m128 xmm
|
|
// Construct and append a PMAXUD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMAXUD(mx, x operand.Op) { ctx.PMAXUD(mx, x) }
|
|
|
|
// PMAXUW: Maximum of Packed Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXUW xmm xmm
|
|
// PMAXUW m128 xmm
|
|
// Construct and append a PMAXUW instruction to the active function.
|
|
func (c *Context) PMAXUW(mx, x operand.Op) {
|
|
if inst, err := x86.PMAXUW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMAXUW: Maximum of Packed Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXUW xmm xmm
|
|
// PMAXUW m128 xmm
|
|
// Construct and append a PMAXUW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMAXUW(mx, x operand.Op) { ctx.PMAXUW(mx, x) }
|
|
|
|
// PMINSB: Minimum of Packed Signed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINSB xmm xmm
|
|
// PMINSB m128 xmm
|
|
// Construct and append a PMINSB instruction to the active function.
|
|
func (c *Context) PMINSB(mx, x operand.Op) {
|
|
if inst, err := x86.PMINSB(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMINSB: Minimum of Packed Signed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINSB xmm xmm
|
|
// PMINSB m128 xmm
|
|
// Construct and append a PMINSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMINSB(mx, x operand.Op) { ctx.PMINSB(mx, x) }
|
|
|
|
// PMINSD: Minimum of Packed Signed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINSD xmm xmm
|
|
// PMINSD m128 xmm
|
|
// Construct and append a PMINSD instruction to the active function.
|
|
func (c *Context) PMINSD(mx, x operand.Op) {
|
|
if inst, err := x86.PMINSD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMINSD: Minimum of Packed Signed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINSD xmm xmm
|
|
// PMINSD m128 xmm
|
|
// Construct and append a PMINSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMINSD(mx, x operand.Op) { ctx.PMINSD(mx, x) }
|
|
|
|
// PMINSW: Minimum of Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINSW xmm xmm
|
|
// PMINSW m128 xmm
|
|
// Construct and append a PMINSW instruction to the active function.
|
|
func (c *Context) PMINSW(mx, x operand.Op) {
|
|
if inst, err := x86.PMINSW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMINSW: Minimum of Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINSW xmm xmm
|
|
// PMINSW m128 xmm
|
|
// Construct and append a PMINSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMINSW(mx, x operand.Op) { ctx.PMINSW(mx, x) }
|
|
|
|
// PMINUB: Minimum of Packed Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINUB xmm xmm
|
|
// PMINUB m128 xmm
|
|
// Construct and append a PMINUB instruction to the active function.
|
|
func (c *Context) PMINUB(mx, x operand.Op) {
|
|
if inst, err := x86.PMINUB(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMINUB: Minimum of Packed Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINUB xmm xmm
|
|
// PMINUB m128 xmm
|
|
// Construct and append a PMINUB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMINUB(mx, x operand.Op) { ctx.PMINUB(mx, x) }
|
|
|
|
// PMINUD: Minimum of Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINUD xmm xmm
|
|
// PMINUD m128 xmm
|
|
// Construct and append a PMINUD instruction to the active function.
|
|
func (c *Context) PMINUD(mx, x operand.Op) {
|
|
if inst, err := x86.PMINUD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMINUD: Minimum of Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINUD xmm xmm
|
|
// PMINUD m128 xmm
|
|
// Construct and append a PMINUD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMINUD(mx, x operand.Op) { ctx.PMINUD(mx, x) }
|
|
|
|
// PMINUW: Minimum of Packed Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINUW xmm xmm
|
|
// PMINUW m128 xmm
|
|
// Construct and append a PMINUW instruction to the active function.
|
|
func (c *Context) PMINUW(mx, x operand.Op) {
|
|
if inst, err := x86.PMINUW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMINUW: Minimum of Packed Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINUW xmm xmm
|
|
// PMINUW m128 xmm
|
|
// Construct and append a PMINUW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMINUW(mx, x operand.Op) { ctx.PMINUW(mx, x) }
|
|
|
|
// PMOVMSKB: Move Byte Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVMSKB xmm r32
|
|
// Construct and append a PMOVMSKB instruction to the active function.
|
|
func (c *Context) PMOVMSKB(x, r operand.Op) {
|
|
if inst, err := x86.PMOVMSKB(x, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMOVMSKB: Move Byte Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVMSKB xmm r32
|
|
// Construct and append a PMOVMSKB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVMSKB(x, r operand.Op) { ctx.PMOVMSKB(x, r) }
|
|
|
|
// PMOVSXBD: Move Packed Byte Integers to Doubleword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXBD xmm xmm
|
|
// PMOVSXBD m32 xmm
|
|
// Construct and append a PMOVSXBD instruction to the active function.
|
|
func (c *Context) PMOVSXBD(mx, x operand.Op) {
|
|
if inst, err := x86.PMOVSXBD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMOVSXBD: Move Packed Byte Integers to Doubleword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXBD xmm xmm
|
|
// PMOVSXBD m32 xmm
|
|
// Construct and append a PMOVSXBD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVSXBD(mx, x operand.Op) { ctx.PMOVSXBD(mx, x) }
|
|
|
|
// PMOVSXBQ: Move Packed Byte Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXBQ xmm xmm
|
|
// PMOVSXBQ m16 xmm
|
|
// Construct and append a PMOVSXBQ instruction to the active function.
|
|
func (c *Context) PMOVSXBQ(mx, x operand.Op) {
|
|
if inst, err := x86.PMOVSXBQ(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMOVSXBQ: Move Packed Byte Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXBQ xmm xmm
|
|
// PMOVSXBQ m16 xmm
|
|
// Construct and append a PMOVSXBQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVSXBQ(mx, x operand.Op) { ctx.PMOVSXBQ(mx, x) }
|
|
|
|
// PMOVSXBW: Move Packed Byte Integers to Word Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXBW xmm xmm
|
|
// PMOVSXBW m64 xmm
|
|
// Construct and append a PMOVSXBW instruction to the active function.
|
|
func (c *Context) PMOVSXBW(mx, x operand.Op) {
|
|
if inst, err := x86.PMOVSXBW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMOVSXBW: Move Packed Byte Integers to Word Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXBW xmm xmm
|
|
// PMOVSXBW m64 xmm
|
|
// Construct and append a PMOVSXBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVSXBW(mx, x operand.Op) { ctx.PMOVSXBW(mx, x) }
|
|
|
|
// PMOVSXDQ: Move Packed Doubleword Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXDQ xmm xmm
|
|
// PMOVSXDQ m64 xmm
|
|
// Construct and append a PMOVSXDQ instruction to the active function.
|
|
func (c *Context) PMOVSXDQ(mx, x operand.Op) {
|
|
if inst, err := x86.PMOVSXDQ(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMOVSXDQ: Move Packed Doubleword Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXDQ xmm xmm
|
|
// PMOVSXDQ m64 xmm
|
|
// Construct and append a PMOVSXDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVSXDQ(mx, x operand.Op) { ctx.PMOVSXDQ(mx, x) }
|
|
|
|
// PMOVSXWD: Move Packed Word Integers to Doubleword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXWD xmm xmm
|
|
// PMOVSXWD m64 xmm
|
|
// Construct and append a PMOVSXWD instruction to the active function.
|
|
func (c *Context) PMOVSXWD(mx, x operand.Op) {
|
|
if inst, err := x86.PMOVSXWD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMOVSXWD: Move Packed Word Integers to Doubleword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXWD xmm xmm
|
|
// PMOVSXWD m64 xmm
|
|
// Construct and append a PMOVSXWD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVSXWD(mx, x operand.Op) { ctx.PMOVSXWD(mx, x) }
|
|
|
|
// PMOVSXWQ: Move Packed Word Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXWQ xmm xmm
|
|
// PMOVSXWQ m32 xmm
|
|
// Construct and append a PMOVSXWQ instruction to the active function.
|
|
func (c *Context) PMOVSXWQ(mx, x operand.Op) {
|
|
if inst, err := x86.PMOVSXWQ(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMOVSXWQ: Move Packed Word Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXWQ xmm xmm
|
|
// PMOVSXWQ m32 xmm
|
|
// Construct and append a PMOVSXWQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVSXWQ(mx, x operand.Op) { ctx.PMOVSXWQ(mx, x) }
|
|
|
|
// PMOVZXBD: Move Packed Byte Integers to Doubleword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXBD xmm xmm
|
|
// PMOVZXBD m32 xmm
|
|
// Construct and append a PMOVZXBD instruction to the active function.
|
|
func (c *Context) PMOVZXBD(mx, x operand.Op) {
|
|
if inst, err := x86.PMOVZXBD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMOVZXBD: Move Packed Byte Integers to Doubleword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXBD xmm xmm
|
|
// PMOVZXBD m32 xmm
|
|
// Construct and append a PMOVZXBD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVZXBD(mx, x operand.Op) { ctx.PMOVZXBD(mx, x) }
|
|
|
|
// PMOVZXBQ: Move Packed Byte Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXBQ xmm xmm
|
|
// PMOVZXBQ m16 xmm
|
|
// Construct and append a PMOVZXBQ instruction to the active function.
|
|
func (c *Context) PMOVZXBQ(mx, x operand.Op) {
|
|
if inst, err := x86.PMOVZXBQ(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMOVZXBQ: Move Packed Byte Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXBQ xmm xmm
|
|
// PMOVZXBQ m16 xmm
|
|
// Construct and append a PMOVZXBQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVZXBQ(mx, x operand.Op) { ctx.PMOVZXBQ(mx, x) }
|
|
|
|
// PMOVZXBW: Move Packed Byte Integers to Word Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXBW xmm xmm
|
|
// PMOVZXBW m64 xmm
|
|
// Construct and append a PMOVZXBW instruction to the active function.
|
|
func (c *Context) PMOVZXBW(mx, x operand.Op) {
|
|
if inst, err := x86.PMOVZXBW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMOVZXBW: Move Packed Byte Integers to Word Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXBW xmm xmm
|
|
// PMOVZXBW m64 xmm
|
|
// Construct and append a PMOVZXBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVZXBW(mx, x operand.Op) { ctx.PMOVZXBW(mx, x) }
|
|
|
|
// PMOVZXDQ: Move Packed Doubleword Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXDQ xmm xmm
|
|
// PMOVZXDQ m64 xmm
|
|
// Construct and append a PMOVZXDQ instruction to the active function.
|
|
func (c *Context) PMOVZXDQ(mx, x operand.Op) {
|
|
if inst, err := x86.PMOVZXDQ(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMOVZXDQ: Move Packed Doubleword Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXDQ xmm xmm
|
|
// PMOVZXDQ m64 xmm
|
|
// Construct and append a PMOVZXDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVZXDQ(mx, x operand.Op) { ctx.PMOVZXDQ(mx, x) }
|
|
|
|
// PMOVZXWD: Move Packed Word Integers to Doubleword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXWD xmm xmm
|
|
// PMOVZXWD m64 xmm
|
|
// Construct and append a PMOVZXWD instruction to the active function.
|
|
func (c *Context) PMOVZXWD(mx, x operand.Op) {
|
|
if inst, err := x86.PMOVZXWD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMOVZXWD: Move Packed Word Integers to Doubleword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXWD xmm xmm
|
|
// PMOVZXWD m64 xmm
|
|
// Construct and append a PMOVZXWD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVZXWD(mx, x operand.Op) { ctx.PMOVZXWD(mx, x) }
|
|
|
|
// PMOVZXWQ: Move Packed Word Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXWQ xmm xmm
|
|
// PMOVZXWQ m32 xmm
|
|
// Construct and append a PMOVZXWQ instruction to the active function.
|
|
func (c *Context) PMOVZXWQ(mx, x operand.Op) {
|
|
if inst, err := x86.PMOVZXWQ(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMOVZXWQ: Move Packed Word Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXWQ xmm xmm
|
|
// PMOVZXWQ m32 xmm
|
|
// Construct and append a PMOVZXWQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVZXWQ(mx, x operand.Op) { ctx.PMOVZXWQ(mx, x) }
|
|
|
|
// PMULDQ: Multiply Packed Signed Doubleword Integers and Store Quadword Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULDQ xmm xmm
|
|
// PMULDQ m128 xmm
|
|
// Construct and append a PMULDQ instruction to the active function.
|
|
func (c *Context) PMULDQ(mx, x operand.Op) {
|
|
if inst, err := x86.PMULDQ(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMULDQ: Multiply Packed Signed Doubleword Integers and Store Quadword Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULDQ xmm xmm
|
|
// PMULDQ m128 xmm
|
|
// Construct and append a PMULDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMULDQ(mx, x operand.Op) { ctx.PMULDQ(mx, x) }
|
|
|
|
// PMULHRSW: Packed Multiply Signed Word Integers and Store High Result with Round and Scale.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULHRSW xmm xmm
|
|
// PMULHRSW m128 xmm
|
|
// Construct and append a PMULHRSW instruction to the active function.
|
|
func (c *Context) PMULHRSW(mx, x operand.Op) {
|
|
if inst, err := x86.PMULHRSW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMULHRSW: Packed Multiply Signed Word Integers and Store High Result with Round and Scale.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULHRSW xmm xmm
|
|
// PMULHRSW m128 xmm
|
|
// Construct and append a PMULHRSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMULHRSW(mx, x operand.Op) { ctx.PMULHRSW(mx, x) }
|
|
|
|
// PMULHUW: Multiply Packed Unsigned Word Integers and Store High Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULHUW xmm xmm
|
|
// PMULHUW m128 xmm
|
|
// Construct and append a PMULHUW instruction to the active function.
|
|
func (c *Context) PMULHUW(mx, x operand.Op) {
|
|
if inst, err := x86.PMULHUW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMULHUW: Multiply Packed Unsigned Word Integers and Store High Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULHUW xmm xmm
|
|
// PMULHUW m128 xmm
|
|
// Construct and append a PMULHUW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMULHUW(mx, x operand.Op) { ctx.PMULHUW(mx, x) }
|
|
|
|
// PMULHW: Multiply Packed Signed Word Integers and Store High Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULHW xmm xmm
|
|
// PMULHW m128 xmm
|
|
// Construct and append a PMULHW instruction to the active function.
|
|
func (c *Context) PMULHW(mx, x operand.Op) {
|
|
if inst, err := x86.PMULHW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMULHW: Multiply Packed Signed Word Integers and Store High Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULHW xmm xmm
|
|
// PMULHW m128 xmm
|
|
// Construct and append a PMULHW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMULHW(mx, x operand.Op) { ctx.PMULHW(mx, x) }
|
|
|
|
// PMULLD: Multiply Packed Signed Doubleword Integers and Store Low Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULLD xmm xmm
|
|
// PMULLD m128 xmm
|
|
// Construct and append a PMULLD instruction to the active function.
|
|
func (c *Context) PMULLD(mx, x operand.Op) {
|
|
if inst, err := x86.PMULLD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMULLD: Multiply Packed Signed Doubleword Integers and Store Low Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULLD xmm xmm
|
|
// PMULLD m128 xmm
|
|
// Construct and append a PMULLD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMULLD(mx, x operand.Op) { ctx.PMULLD(mx, x) }
|
|
|
|
// PMULLW: Multiply Packed Signed Word Integers and Store Low Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULLW xmm xmm
|
|
// PMULLW m128 xmm
|
|
// Construct and append a PMULLW instruction to the active function.
|
|
func (c *Context) PMULLW(mx, x operand.Op) {
|
|
if inst, err := x86.PMULLW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMULLW: Multiply Packed Signed Word Integers and Store Low Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULLW xmm xmm
|
|
// PMULLW m128 xmm
|
|
// Construct and append a PMULLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMULLW(mx, x operand.Op) { ctx.PMULLW(mx, x) }
|
|
|
|
// PMULULQ: Multiply Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULULQ xmm xmm
|
|
// PMULULQ m128 xmm
|
|
// Construct and append a PMULULQ instruction to the active function.
|
|
func (c *Context) PMULULQ(mx, x operand.Op) {
|
|
if inst, err := x86.PMULULQ(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PMULULQ: Multiply Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULULQ xmm xmm
|
|
// PMULULQ m128 xmm
|
|
// Construct and append a PMULULQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMULULQ(mx, x operand.Op) { ctx.PMULULQ(mx, x) }
|
|
|
|
// POPCNTL: Count of Number of Bits Set to 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POPCNTL r32 r32
|
|
// POPCNTL m32 r32
|
|
// Construct and append a POPCNTL instruction to the active function.
|
|
func (c *Context) POPCNTL(mr, r operand.Op) {
|
|
if inst, err := x86.POPCNTL(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// POPCNTL: Count of Number of Bits Set to 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POPCNTL r32 r32
|
|
// POPCNTL m32 r32
|
|
// Construct and append a POPCNTL instruction to the active function.
|
|
// Operates on the global context.
|
|
func POPCNTL(mr, r operand.Op) { ctx.POPCNTL(mr, r) }
|
|
|
|
// POPCNTQ: Count of Number of Bits Set to 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POPCNTQ r64 r64
|
|
// POPCNTQ m64 r64
|
|
// Construct and append a POPCNTQ instruction to the active function.
|
|
func (c *Context) POPCNTQ(mr, r operand.Op) {
|
|
if inst, err := x86.POPCNTQ(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// POPCNTQ: Count of Number of Bits Set to 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POPCNTQ r64 r64
|
|
// POPCNTQ m64 r64
|
|
// Construct and append a POPCNTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func POPCNTQ(mr, r operand.Op) { ctx.POPCNTQ(mr, r) }
|
|
|
|
// POPCNTW: Count of Number of Bits Set to 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POPCNTW r16 r16
|
|
// POPCNTW m16 r16
|
|
// Construct and append a POPCNTW instruction to the active function.
|
|
func (c *Context) POPCNTW(mr, r operand.Op) {
|
|
if inst, err := x86.POPCNTW(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// POPCNTW: Count of Number of Bits Set to 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POPCNTW r16 r16
|
|
// POPCNTW m16 r16
|
|
// Construct and append a POPCNTW instruction to the active function.
|
|
// Operates on the global context.
|
|
func POPCNTW(mr, r operand.Op) { ctx.POPCNTW(mr, r) }
|
|
|
|
// POPQ: Pop a Value from the Stack.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POPQ r64
|
|
// POPQ m64
|
|
// Construct and append a POPQ instruction to the active function.
|
|
func (c *Context) POPQ(mr operand.Op) {
|
|
if inst, err := x86.POPQ(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// POPQ: Pop a Value from the Stack.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POPQ r64
|
|
// POPQ m64
|
|
// Construct and append a POPQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func POPQ(mr operand.Op) { ctx.POPQ(mr) }
|
|
|
|
// POPW: Pop a Value from the Stack.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POPW r16
|
|
// POPW m16
|
|
// Construct and append a POPW instruction to the active function.
|
|
func (c *Context) POPW(mr operand.Op) {
|
|
if inst, err := x86.POPW(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// POPW: Pop a Value from the Stack.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POPW r16
|
|
// POPW m16
|
|
// Construct and append a POPW instruction to the active function.
|
|
// Operates on the global context.
|
|
func POPW(mr operand.Op) { ctx.POPW(mr) }
|
|
|
|
// POR: Packed Bitwise Logical OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POR xmm xmm
|
|
// POR m128 xmm
|
|
// Construct and append a POR instruction to the active function.
|
|
func (c *Context) POR(mx, x operand.Op) {
|
|
if inst, err := x86.POR(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// POR: Packed Bitwise Logical OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POR xmm xmm
|
|
// POR m128 xmm
|
|
// Construct and append a POR instruction to the active function.
|
|
// Operates on the global context.
|
|
func POR(mx, x operand.Op) { ctx.POR(mx, x) }
|
|
|
|
// PREFETCHNTA: Prefetch Data Into Caches using NTA Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PREFETCHNTA m8
|
|
// Construct and append a PREFETCHNTA instruction to the active function.
|
|
func (c *Context) PREFETCHNTA(m operand.Op) {
|
|
if inst, err := x86.PREFETCHNTA(m); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PREFETCHNTA: Prefetch Data Into Caches using NTA Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PREFETCHNTA m8
|
|
// Construct and append a PREFETCHNTA instruction to the active function.
|
|
// Operates on the global context.
|
|
func PREFETCHNTA(m operand.Op) { ctx.PREFETCHNTA(m) }
|
|
|
|
// PREFETCHT0: Prefetch Data Into Caches using T0 Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PREFETCHT0 m8
|
|
// Construct and append a PREFETCHT0 instruction to the active function.
|
|
func (c *Context) PREFETCHT0(m operand.Op) {
|
|
if inst, err := x86.PREFETCHT0(m); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PREFETCHT0: Prefetch Data Into Caches using T0 Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PREFETCHT0 m8
|
|
// Construct and append a PREFETCHT0 instruction to the active function.
|
|
// Operates on the global context.
|
|
func PREFETCHT0(m operand.Op) { ctx.PREFETCHT0(m) }
|
|
|
|
// PREFETCHT1: Prefetch Data Into Caches using T1 Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PREFETCHT1 m8
|
|
// Construct and append a PREFETCHT1 instruction to the active function.
|
|
func (c *Context) PREFETCHT1(m operand.Op) {
|
|
if inst, err := x86.PREFETCHT1(m); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PREFETCHT1: Prefetch Data Into Caches using T1 Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PREFETCHT1 m8
|
|
// Construct and append a PREFETCHT1 instruction to the active function.
|
|
// Operates on the global context.
|
|
func PREFETCHT1(m operand.Op) { ctx.PREFETCHT1(m) }
|
|
|
|
// PREFETCHT2: Prefetch Data Into Caches using T2 Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PREFETCHT2 m8
|
|
// Construct and append a PREFETCHT2 instruction to the active function.
|
|
func (c *Context) PREFETCHT2(m operand.Op) {
|
|
if inst, err := x86.PREFETCHT2(m); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PREFETCHT2: Prefetch Data Into Caches using T2 Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PREFETCHT2 m8
|
|
// Construct and append a PREFETCHT2 instruction to the active function.
|
|
// Operates on the global context.
|
|
func PREFETCHT2(m operand.Op) { ctx.PREFETCHT2(m) }
|
|
|
|
// PSADBW: Compute Sum of Absolute Differences.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSADBW xmm xmm
|
|
// PSADBW m128 xmm
|
|
// Construct and append a PSADBW instruction to the active function.
|
|
func (c *Context) PSADBW(mx, x operand.Op) {
|
|
if inst, err := x86.PSADBW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSADBW: Compute Sum of Absolute Differences.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSADBW xmm xmm
|
|
// PSADBW m128 xmm
|
|
// Construct and append a PSADBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSADBW(mx, x operand.Op) { ctx.PSADBW(mx, x) }
|
|
|
|
// PSHUFB: Packed Shuffle Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSHUFB xmm xmm
|
|
// PSHUFB m128 xmm
|
|
// Construct and append a PSHUFB instruction to the active function.
|
|
func (c *Context) PSHUFB(mx, x operand.Op) {
|
|
if inst, err := x86.PSHUFB(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSHUFB: Packed Shuffle Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSHUFB xmm xmm
|
|
// PSHUFB m128 xmm
|
|
// Construct and append a PSHUFB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSHUFB(mx, x operand.Op) { ctx.PSHUFB(mx, x) }
|
|
|
|
// PSHUFD: Shuffle Packed Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSHUFD imm8 xmm xmm
|
|
// PSHUFD imm8 m128 xmm
|
|
// Construct and append a PSHUFD instruction to the active function.
|
|
func (c *Context) PSHUFD(i, mx, x operand.Op) {
|
|
if inst, err := x86.PSHUFD(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSHUFD: Shuffle Packed Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSHUFD imm8 xmm xmm
|
|
// PSHUFD imm8 m128 xmm
|
|
// Construct and append a PSHUFD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSHUFD(i, mx, x operand.Op) { ctx.PSHUFD(i, mx, x) }
|
|
|
|
// PSHUFHW: Shuffle Packed High Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSHUFHW imm8 xmm xmm
|
|
// PSHUFHW imm8 m128 xmm
|
|
// Construct and append a PSHUFHW instruction to the active function.
|
|
func (c *Context) PSHUFHW(i, mx, x operand.Op) {
|
|
if inst, err := x86.PSHUFHW(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSHUFHW: Shuffle Packed High Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSHUFHW imm8 xmm xmm
|
|
// PSHUFHW imm8 m128 xmm
|
|
// Construct and append a PSHUFHW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSHUFHW(i, mx, x operand.Op) { ctx.PSHUFHW(i, mx, x) }
|
|
|
|
// PSHUFL: Shuffle Packed Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSHUFL imm8 xmm xmm
|
|
// PSHUFL imm8 m128 xmm
|
|
// Construct and append a PSHUFL instruction to the active function.
|
|
func (c *Context) PSHUFL(i, mx, x operand.Op) {
|
|
if inst, err := x86.PSHUFL(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSHUFL: Shuffle Packed Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSHUFL imm8 xmm xmm
|
|
// PSHUFL imm8 m128 xmm
|
|
// Construct and append a PSHUFL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSHUFL(i, mx, x operand.Op) { ctx.PSHUFL(i, mx, x) }
|
|
|
|
// PSHUFLW: Shuffle Packed Low Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSHUFLW imm8 xmm xmm
|
|
// PSHUFLW imm8 m128 xmm
|
|
// Construct and append a PSHUFLW instruction to the active function.
|
|
func (c *Context) PSHUFLW(i, mx, x operand.Op) {
|
|
if inst, err := x86.PSHUFLW(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSHUFLW: Shuffle Packed Low Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSHUFLW imm8 xmm xmm
|
|
// PSHUFLW imm8 m128 xmm
|
|
// Construct and append a PSHUFLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSHUFLW(i, mx, x operand.Op) { ctx.PSHUFLW(i, mx, x) }
|
|
|
|
// PSIGNB: Packed Sign of Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSIGNB xmm xmm
|
|
// PSIGNB m128 xmm
|
|
// Construct and append a PSIGNB instruction to the active function.
|
|
func (c *Context) PSIGNB(mx, x operand.Op) {
|
|
if inst, err := x86.PSIGNB(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSIGNB: Packed Sign of Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSIGNB xmm xmm
|
|
// PSIGNB m128 xmm
|
|
// Construct and append a PSIGNB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSIGNB(mx, x operand.Op) { ctx.PSIGNB(mx, x) }
|
|
|
|
// PSIGND: Packed Sign of Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSIGND xmm xmm
|
|
// PSIGND m128 xmm
|
|
// Construct and append a PSIGND instruction to the active function.
|
|
func (c *Context) PSIGND(mx, x operand.Op) {
|
|
if inst, err := x86.PSIGND(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSIGND: Packed Sign of Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSIGND xmm xmm
|
|
// PSIGND m128 xmm
|
|
// Construct and append a PSIGND instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSIGND(mx, x operand.Op) { ctx.PSIGND(mx, x) }
|
|
|
|
// PSIGNW: Packed Sign of Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSIGNW xmm xmm
|
|
// PSIGNW m128 xmm
|
|
// Construct and append a PSIGNW instruction to the active function.
|
|
func (c *Context) PSIGNW(mx, x operand.Op) {
|
|
if inst, err := x86.PSIGNW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSIGNW: Packed Sign of Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSIGNW xmm xmm
|
|
// PSIGNW m128 xmm
|
|
// Construct and append a PSIGNW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSIGNW(mx, x operand.Op) { ctx.PSIGNW(mx, x) }
|
|
|
|
// PSLLDQ: Shift Packed Double Quadword Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSLLDQ imm8 xmm
|
|
// Construct and append a PSLLDQ instruction to the active function.
|
|
func (c *Context) PSLLDQ(i, x operand.Op) {
|
|
if inst, err := x86.PSLLDQ(i, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSLLDQ: Shift Packed Double Quadword Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSLLDQ imm8 xmm
|
|
// Construct and append a PSLLDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSLLDQ(i, x operand.Op) { ctx.PSLLDQ(i, x) }
|
|
|
|
// PSLLL: Shift Packed Doubleword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSLLL imm8 xmm
|
|
// PSLLL xmm xmm
|
|
// PSLLL m128 xmm
|
|
// Construct and append a PSLLL instruction to the active function.
|
|
func (c *Context) PSLLL(imx, x operand.Op) {
|
|
if inst, err := x86.PSLLL(imx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSLLL: Shift Packed Doubleword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSLLL imm8 xmm
|
|
// PSLLL xmm xmm
|
|
// PSLLL m128 xmm
|
|
// Construct and append a PSLLL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSLLL(imx, x operand.Op) { ctx.PSLLL(imx, x) }
|
|
|
|
// PSLLO: Shift Packed Double Quadword Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSLLO imm8 xmm
|
|
// Construct and append a PSLLO instruction to the active function.
|
|
func (c *Context) PSLLO(i, x operand.Op) {
|
|
if inst, err := x86.PSLLO(i, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSLLO: Shift Packed Double Quadword Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSLLO imm8 xmm
|
|
// Construct and append a PSLLO instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSLLO(i, x operand.Op) { ctx.PSLLO(i, x) }
|
|
|
|
// PSLLQ: Shift Packed Quadword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSLLQ imm8 xmm
|
|
// PSLLQ xmm xmm
|
|
// PSLLQ m128 xmm
|
|
// Construct and append a PSLLQ instruction to the active function.
|
|
func (c *Context) PSLLQ(imx, x operand.Op) {
|
|
if inst, err := x86.PSLLQ(imx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSLLQ: Shift Packed Quadword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSLLQ imm8 xmm
|
|
// PSLLQ xmm xmm
|
|
// PSLLQ m128 xmm
|
|
// Construct and append a PSLLQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSLLQ(imx, x operand.Op) { ctx.PSLLQ(imx, x) }
|
|
|
|
// PSLLW: Shift Packed Word Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSLLW imm8 xmm
|
|
// PSLLW xmm xmm
|
|
// PSLLW m128 xmm
|
|
// Construct and append a PSLLW instruction to the active function.
|
|
func (c *Context) PSLLW(imx, x operand.Op) {
|
|
if inst, err := x86.PSLLW(imx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSLLW: Shift Packed Word Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSLLW imm8 xmm
|
|
// PSLLW xmm xmm
|
|
// PSLLW m128 xmm
|
|
// Construct and append a PSLLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSLLW(imx, x operand.Op) { ctx.PSLLW(imx, x) }
|
|
|
|
// PSRAL: Shift Packed Doubleword Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRAL imm8 xmm
|
|
// PSRAL xmm xmm
|
|
// PSRAL m128 xmm
|
|
// Construct and append a PSRAL instruction to the active function.
|
|
func (c *Context) PSRAL(imx, x operand.Op) {
|
|
if inst, err := x86.PSRAL(imx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSRAL: Shift Packed Doubleword Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRAL imm8 xmm
|
|
// PSRAL xmm xmm
|
|
// PSRAL m128 xmm
|
|
// Construct and append a PSRAL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSRAL(imx, x operand.Op) { ctx.PSRAL(imx, x) }
|
|
|
|
// PSRAW: Shift Packed Word Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRAW imm8 xmm
|
|
// PSRAW xmm xmm
|
|
// PSRAW m128 xmm
|
|
// Construct and append a PSRAW instruction to the active function.
|
|
func (c *Context) PSRAW(imx, x operand.Op) {
|
|
if inst, err := x86.PSRAW(imx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSRAW: Shift Packed Word Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRAW imm8 xmm
|
|
// PSRAW xmm xmm
|
|
// PSRAW m128 xmm
|
|
// Construct and append a PSRAW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSRAW(imx, x operand.Op) { ctx.PSRAW(imx, x) }
|
|
|
|
// PSRLDQ: Shift Packed Double Quadword Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRLDQ imm8 xmm
|
|
// Construct and append a PSRLDQ instruction to the active function.
|
|
func (c *Context) PSRLDQ(i, x operand.Op) {
|
|
if inst, err := x86.PSRLDQ(i, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSRLDQ: Shift Packed Double Quadword Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRLDQ imm8 xmm
|
|
// Construct and append a PSRLDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSRLDQ(i, x operand.Op) { ctx.PSRLDQ(i, x) }
|
|
|
|
// PSRLL: Shift Packed Doubleword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRLL imm8 xmm
|
|
// PSRLL xmm xmm
|
|
// PSRLL m128 xmm
|
|
// Construct and append a PSRLL instruction to the active function.
|
|
func (c *Context) PSRLL(imx, x operand.Op) {
|
|
if inst, err := x86.PSRLL(imx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSRLL: Shift Packed Doubleword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRLL imm8 xmm
|
|
// PSRLL xmm xmm
|
|
// PSRLL m128 xmm
|
|
// Construct and append a PSRLL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSRLL(imx, x operand.Op) { ctx.PSRLL(imx, x) }
|
|
|
|
// PSRLO: Shift Packed Double Quadword Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRLO imm8 xmm
|
|
// Construct and append a PSRLO instruction to the active function.
|
|
func (c *Context) PSRLO(i, x operand.Op) {
|
|
if inst, err := x86.PSRLO(i, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSRLO: Shift Packed Double Quadword Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRLO imm8 xmm
|
|
// Construct and append a PSRLO instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSRLO(i, x operand.Op) { ctx.PSRLO(i, x) }
|
|
|
|
// PSRLQ: Shift Packed Quadword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRLQ imm8 xmm
|
|
// PSRLQ xmm xmm
|
|
// PSRLQ m128 xmm
|
|
// Construct and append a PSRLQ instruction to the active function.
|
|
func (c *Context) PSRLQ(imx, x operand.Op) {
|
|
if inst, err := x86.PSRLQ(imx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSRLQ: Shift Packed Quadword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRLQ imm8 xmm
|
|
// PSRLQ xmm xmm
|
|
// PSRLQ m128 xmm
|
|
// Construct and append a PSRLQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSRLQ(imx, x operand.Op) { ctx.PSRLQ(imx, x) }
|
|
|
|
// PSRLW: Shift Packed Word Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRLW imm8 xmm
|
|
// PSRLW xmm xmm
|
|
// PSRLW m128 xmm
|
|
// Construct and append a PSRLW instruction to the active function.
|
|
func (c *Context) PSRLW(imx, x operand.Op) {
|
|
if inst, err := x86.PSRLW(imx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSRLW: Shift Packed Word Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRLW imm8 xmm
|
|
// PSRLW xmm xmm
|
|
// PSRLW m128 xmm
|
|
// Construct and append a PSRLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSRLW(imx, x operand.Op) { ctx.PSRLW(imx, x) }
|
|
|
|
// PSUBB: Subtract Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBB xmm xmm
|
|
// PSUBB m128 xmm
|
|
// Construct and append a PSUBB instruction to the active function.
|
|
func (c *Context) PSUBB(mx, x operand.Op) {
|
|
if inst, err := x86.PSUBB(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSUBB: Subtract Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBB xmm xmm
|
|
// PSUBB m128 xmm
|
|
// Construct and append a PSUBB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSUBB(mx, x operand.Op) { ctx.PSUBB(mx, x) }
|
|
|
|
// PSUBL: Subtract Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBL xmm xmm
|
|
// PSUBL m128 xmm
|
|
// Construct and append a PSUBL instruction to the active function.
|
|
func (c *Context) PSUBL(mx, x operand.Op) {
|
|
if inst, err := x86.PSUBL(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSUBL: Subtract Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBL xmm xmm
|
|
// PSUBL m128 xmm
|
|
// Construct and append a PSUBL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSUBL(mx, x operand.Op) { ctx.PSUBL(mx, x) }
|
|
|
|
// PSUBQ: Subtract Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBQ xmm xmm
|
|
// PSUBQ m128 xmm
|
|
// Construct and append a PSUBQ instruction to the active function.
|
|
func (c *Context) PSUBQ(mx, x operand.Op) {
|
|
if inst, err := x86.PSUBQ(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSUBQ: Subtract Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBQ xmm xmm
|
|
// PSUBQ m128 xmm
|
|
// Construct and append a PSUBQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSUBQ(mx, x operand.Op) { ctx.PSUBQ(mx, x) }
|
|
|
|
// PSUBSB: Subtract Packed Signed Byte Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBSB xmm xmm
|
|
// PSUBSB m128 xmm
|
|
// Construct and append a PSUBSB instruction to the active function.
|
|
func (c *Context) PSUBSB(mx, x operand.Op) {
|
|
if inst, err := x86.PSUBSB(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSUBSB: Subtract Packed Signed Byte Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBSB xmm xmm
|
|
// PSUBSB m128 xmm
|
|
// Construct and append a PSUBSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSUBSB(mx, x operand.Op) { ctx.PSUBSB(mx, x) }
|
|
|
|
// PSUBSW: Subtract Packed Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBSW xmm xmm
|
|
// PSUBSW m128 xmm
|
|
// Construct and append a PSUBSW instruction to the active function.
|
|
func (c *Context) PSUBSW(mx, x operand.Op) {
|
|
if inst, err := x86.PSUBSW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSUBSW: Subtract Packed Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBSW xmm xmm
|
|
// PSUBSW m128 xmm
|
|
// Construct and append a PSUBSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSUBSW(mx, x operand.Op) { ctx.PSUBSW(mx, x) }
|
|
|
|
// PSUBUSB: Subtract Packed Unsigned Byte Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBUSB xmm xmm
|
|
// PSUBUSB m128 xmm
|
|
// Construct and append a PSUBUSB instruction to the active function.
|
|
func (c *Context) PSUBUSB(mx, x operand.Op) {
|
|
if inst, err := x86.PSUBUSB(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSUBUSB: Subtract Packed Unsigned Byte Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBUSB xmm xmm
|
|
// PSUBUSB m128 xmm
|
|
// Construct and append a PSUBUSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSUBUSB(mx, x operand.Op) { ctx.PSUBUSB(mx, x) }
|
|
|
|
// PSUBUSW: Subtract Packed Unsigned Word Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBUSW xmm xmm
|
|
// PSUBUSW m128 xmm
|
|
// Construct and append a PSUBUSW instruction to the active function.
|
|
func (c *Context) PSUBUSW(mx, x operand.Op) {
|
|
if inst, err := x86.PSUBUSW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSUBUSW: Subtract Packed Unsigned Word Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBUSW xmm xmm
|
|
// PSUBUSW m128 xmm
|
|
// Construct and append a PSUBUSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSUBUSW(mx, x operand.Op) { ctx.PSUBUSW(mx, x) }
|
|
|
|
// PSUBW: Subtract Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBW xmm xmm
|
|
// PSUBW m128 xmm
|
|
// Construct and append a PSUBW instruction to the active function.
|
|
func (c *Context) PSUBW(mx, x operand.Op) {
|
|
if inst, err := x86.PSUBW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PSUBW: Subtract Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBW xmm xmm
|
|
// PSUBW m128 xmm
|
|
// Construct and append a PSUBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSUBW(mx, x operand.Op) { ctx.PSUBW(mx, x) }
|
|
|
|
// PTEST: Packed Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PTEST xmm xmm
|
|
// PTEST m128 xmm
|
|
// Construct and append a PTEST instruction to the active function.
|
|
func (c *Context) PTEST(mx, x operand.Op) {
|
|
if inst, err := x86.PTEST(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PTEST: Packed Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PTEST xmm xmm
|
|
// PTEST m128 xmm
|
|
// Construct and append a PTEST instruction to the active function.
|
|
// Operates on the global context.
|
|
func PTEST(mx, x operand.Op) { ctx.PTEST(mx, x) }
|
|
|
|
// PUNPCKHBW: Unpack and Interleave High-Order Bytes into Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKHBW xmm xmm
|
|
// PUNPCKHBW m128 xmm
|
|
// Construct and append a PUNPCKHBW instruction to the active function.
|
|
func (c *Context) PUNPCKHBW(mx, x operand.Op) {
|
|
if inst, err := x86.PUNPCKHBW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PUNPCKHBW: Unpack and Interleave High-Order Bytes into Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKHBW xmm xmm
|
|
// PUNPCKHBW m128 xmm
|
|
// Construct and append a PUNPCKHBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PUNPCKHBW(mx, x operand.Op) { ctx.PUNPCKHBW(mx, x) }
|
|
|
|
// PUNPCKHLQ: Unpack and Interleave High-Order Doublewords into Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKHLQ xmm xmm
|
|
// PUNPCKHLQ m128 xmm
|
|
// Construct and append a PUNPCKHLQ instruction to the active function.
|
|
func (c *Context) PUNPCKHLQ(mx, x operand.Op) {
|
|
if inst, err := x86.PUNPCKHLQ(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PUNPCKHLQ: Unpack and Interleave High-Order Doublewords into Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKHLQ xmm xmm
|
|
// PUNPCKHLQ m128 xmm
|
|
// Construct and append a PUNPCKHLQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PUNPCKHLQ(mx, x operand.Op) { ctx.PUNPCKHLQ(mx, x) }
|
|
|
|
// PUNPCKHQDQ: Unpack and Interleave High-Order Quadwords into Double Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKHQDQ xmm xmm
|
|
// PUNPCKHQDQ m128 xmm
|
|
// Construct and append a PUNPCKHQDQ instruction to the active function.
|
|
func (c *Context) PUNPCKHQDQ(mx, x operand.Op) {
|
|
if inst, err := x86.PUNPCKHQDQ(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PUNPCKHQDQ: Unpack and Interleave High-Order Quadwords into Double Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKHQDQ xmm xmm
|
|
// PUNPCKHQDQ m128 xmm
|
|
// Construct and append a PUNPCKHQDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PUNPCKHQDQ(mx, x operand.Op) { ctx.PUNPCKHQDQ(mx, x) }
|
|
|
|
// PUNPCKHWL: Unpack and Interleave High-Order Words into Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKHWL xmm xmm
|
|
// PUNPCKHWL m128 xmm
|
|
// Construct and append a PUNPCKHWL instruction to the active function.
|
|
func (c *Context) PUNPCKHWL(mx, x operand.Op) {
|
|
if inst, err := x86.PUNPCKHWL(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PUNPCKHWL: Unpack and Interleave High-Order Words into Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKHWL xmm xmm
|
|
// PUNPCKHWL m128 xmm
|
|
// Construct and append a PUNPCKHWL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PUNPCKHWL(mx, x operand.Op) { ctx.PUNPCKHWL(mx, x) }
|
|
|
|
// PUNPCKLBW: Unpack and Interleave Low-Order Bytes into Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKLBW xmm xmm
|
|
// PUNPCKLBW m128 xmm
|
|
// Construct and append a PUNPCKLBW instruction to the active function.
|
|
func (c *Context) PUNPCKLBW(mx, x operand.Op) {
|
|
if inst, err := x86.PUNPCKLBW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PUNPCKLBW: Unpack and Interleave Low-Order Bytes into Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKLBW xmm xmm
|
|
// PUNPCKLBW m128 xmm
|
|
// Construct and append a PUNPCKLBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PUNPCKLBW(mx, x operand.Op) { ctx.PUNPCKLBW(mx, x) }
|
|
|
|
// PUNPCKLLQ: Unpack and Interleave Low-Order Doublewords into Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKLLQ xmm xmm
|
|
// PUNPCKLLQ m128 xmm
|
|
// Construct and append a PUNPCKLLQ instruction to the active function.
|
|
func (c *Context) PUNPCKLLQ(mx, x operand.Op) {
|
|
if inst, err := x86.PUNPCKLLQ(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PUNPCKLLQ: Unpack and Interleave Low-Order Doublewords into Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKLLQ xmm xmm
|
|
// PUNPCKLLQ m128 xmm
|
|
// Construct and append a PUNPCKLLQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PUNPCKLLQ(mx, x operand.Op) { ctx.PUNPCKLLQ(mx, x) }
|
|
|
|
// PUNPCKLQDQ: Unpack and Interleave Low-Order Quadwords into Double Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKLQDQ xmm xmm
|
|
// PUNPCKLQDQ m128 xmm
|
|
// Construct and append a PUNPCKLQDQ instruction to the active function.
|
|
func (c *Context) PUNPCKLQDQ(mx, x operand.Op) {
|
|
if inst, err := x86.PUNPCKLQDQ(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PUNPCKLQDQ: Unpack and Interleave Low-Order Quadwords into Double Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKLQDQ xmm xmm
|
|
// PUNPCKLQDQ m128 xmm
|
|
// Construct and append a PUNPCKLQDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PUNPCKLQDQ(mx, x operand.Op) { ctx.PUNPCKLQDQ(mx, x) }
|
|
|
|
// PUNPCKLWL: Unpack and Interleave Low-Order Words into Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKLWL xmm xmm
|
|
// PUNPCKLWL m128 xmm
|
|
// Construct and append a PUNPCKLWL instruction to the active function.
|
|
func (c *Context) PUNPCKLWL(mx, x operand.Op) {
|
|
if inst, err := x86.PUNPCKLWL(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PUNPCKLWL: Unpack and Interleave Low-Order Words into Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKLWL xmm xmm
|
|
// PUNPCKLWL m128 xmm
|
|
// Construct and append a PUNPCKLWL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PUNPCKLWL(mx, x operand.Op) { ctx.PUNPCKLWL(mx, x) }
|
|
|
|
// PUSHQ: Push Value Onto the Stack.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUSHQ imm8
|
|
// PUSHQ imm32
|
|
// PUSHQ r64
|
|
// PUSHQ m64
|
|
// Construct and append a PUSHQ instruction to the active function.
|
|
func (c *Context) PUSHQ(imr operand.Op) {
|
|
if inst, err := x86.PUSHQ(imr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PUSHQ: Push Value Onto the Stack.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUSHQ imm8
|
|
// PUSHQ imm32
|
|
// PUSHQ r64
|
|
// PUSHQ m64
|
|
// Construct and append a PUSHQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PUSHQ(imr operand.Op) { ctx.PUSHQ(imr) }
|
|
|
|
// PUSHW: Push Value Onto the Stack.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUSHW r16
|
|
// PUSHW m16
|
|
// Construct and append a PUSHW instruction to the active function.
|
|
func (c *Context) PUSHW(mr operand.Op) {
|
|
if inst, err := x86.PUSHW(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PUSHW: Push Value Onto the Stack.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUSHW r16
|
|
// PUSHW m16
|
|
// Construct and append a PUSHW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PUSHW(mr operand.Op) { ctx.PUSHW(mr) }
|
|
|
|
// PXOR: Packed Bitwise Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PXOR xmm xmm
|
|
// PXOR m128 xmm
|
|
// Construct and append a PXOR instruction to the active function.
|
|
func (c *Context) PXOR(mx, x operand.Op) {
|
|
if inst, err := x86.PXOR(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// PXOR: Packed Bitwise Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PXOR xmm xmm
|
|
// PXOR m128 xmm
|
|
// Construct and append a PXOR instruction to the active function.
|
|
// Operates on the global context.
|
|
func PXOR(mx, x operand.Op) { ctx.PXOR(mx, x) }
|
|
|
|
// RCLB: Rotate Left through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCLB 1 r8
|
|
// RCLB imm8 r8
|
|
// RCLB cl r8
|
|
// RCLB 1 m8
|
|
// RCLB imm8 m8
|
|
// RCLB cl m8
|
|
// Construct and append a RCLB instruction to the active function.
|
|
func (c *Context) RCLB(ci, mr operand.Op) {
|
|
if inst, err := x86.RCLB(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RCLB: Rotate Left through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCLB 1 r8
|
|
// RCLB imm8 r8
|
|
// RCLB cl r8
|
|
// RCLB 1 m8
|
|
// RCLB imm8 m8
|
|
// RCLB cl m8
|
|
// Construct and append a RCLB instruction to the active function.
|
|
// Operates on the global context.
|
|
func RCLB(ci, mr operand.Op) { ctx.RCLB(ci, mr) }
|
|
|
|
// RCLL: Rotate Left through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCLL 1 r32
|
|
// RCLL imm8 r32
|
|
// RCLL cl r32
|
|
// RCLL 1 m32
|
|
// RCLL imm8 m32
|
|
// RCLL cl m32
|
|
// Construct and append a RCLL instruction to the active function.
|
|
func (c *Context) RCLL(ci, mr operand.Op) {
|
|
if inst, err := x86.RCLL(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RCLL: Rotate Left through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCLL 1 r32
|
|
// RCLL imm8 r32
|
|
// RCLL cl r32
|
|
// RCLL 1 m32
|
|
// RCLL imm8 m32
|
|
// RCLL cl m32
|
|
// Construct and append a RCLL instruction to the active function.
|
|
// Operates on the global context.
|
|
func RCLL(ci, mr operand.Op) { ctx.RCLL(ci, mr) }
|
|
|
|
// RCLQ: Rotate Left through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCLQ 1 r64
|
|
// RCLQ imm8 r64
|
|
// RCLQ cl r64
|
|
// RCLQ 1 m64
|
|
// RCLQ imm8 m64
|
|
// RCLQ cl m64
|
|
// Construct and append a RCLQ instruction to the active function.
|
|
func (c *Context) RCLQ(ci, mr operand.Op) {
|
|
if inst, err := x86.RCLQ(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RCLQ: Rotate Left through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCLQ 1 r64
|
|
// RCLQ imm8 r64
|
|
// RCLQ cl r64
|
|
// RCLQ 1 m64
|
|
// RCLQ imm8 m64
|
|
// RCLQ cl m64
|
|
// Construct and append a RCLQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func RCLQ(ci, mr operand.Op) { ctx.RCLQ(ci, mr) }
|
|
|
|
// RCLW: Rotate Left through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCLW 1 r16
|
|
// RCLW imm8 r16
|
|
// RCLW cl r16
|
|
// RCLW 1 m16
|
|
// RCLW imm8 m16
|
|
// RCLW cl m16
|
|
// Construct and append a RCLW instruction to the active function.
|
|
func (c *Context) RCLW(ci, mr operand.Op) {
|
|
if inst, err := x86.RCLW(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RCLW: Rotate Left through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCLW 1 r16
|
|
// RCLW imm8 r16
|
|
// RCLW cl r16
|
|
// RCLW 1 m16
|
|
// RCLW imm8 m16
|
|
// RCLW cl m16
|
|
// Construct and append a RCLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func RCLW(ci, mr operand.Op) { ctx.RCLW(ci, mr) }
|
|
|
|
// RCPPS: Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCPPS xmm xmm
|
|
// RCPPS m128 xmm
|
|
// Construct and append a RCPPS instruction to the active function.
|
|
func (c *Context) RCPPS(mx, x operand.Op) {
|
|
if inst, err := x86.RCPPS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RCPPS: Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCPPS xmm xmm
|
|
// RCPPS m128 xmm
|
|
// Construct and append a RCPPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func RCPPS(mx, x operand.Op) { ctx.RCPPS(mx, x) }
|
|
|
|
// RCPSS: Compute Approximate Reciprocal of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCPSS xmm xmm
|
|
// RCPSS m32 xmm
|
|
// Construct and append a RCPSS instruction to the active function.
|
|
func (c *Context) RCPSS(mx, x operand.Op) {
|
|
if inst, err := x86.RCPSS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RCPSS: Compute Approximate Reciprocal of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCPSS xmm xmm
|
|
// RCPSS m32 xmm
|
|
// Construct and append a RCPSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func RCPSS(mx, x operand.Op) { ctx.RCPSS(mx, x) }
|
|
|
|
// RCRB: Rotate Right through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCRB 1 r8
|
|
// RCRB imm8 r8
|
|
// RCRB cl r8
|
|
// RCRB 1 m8
|
|
// RCRB imm8 m8
|
|
// RCRB cl m8
|
|
// Construct and append a RCRB instruction to the active function.
|
|
func (c *Context) RCRB(ci, mr operand.Op) {
|
|
if inst, err := x86.RCRB(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RCRB: Rotate Right through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCRB 1 r8
|
|
// RCRB imm8 r8
|
|
// RCRB cl r8
|
|
// RCRB 1 m8
|
|
// RCRB imm8 m8
|
|
// RCRB cl m8
|
|
// Construct and append a RCRB instruction to the active function.
|
|
// Operates on the global context.
|
|
func RCRB(ci, mr operand.Op) { ctx.RCRB(ci, mr) }
|
|
|
|
// RCRL: Rotate Right through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCRL 1 r32
|
|
// RCRL imm8 r32
|
|
// RCRL cl r32
|
|
// RCRL 1 m32
|
|
// RCRL imm8 m32
|
|
// RCRL cl m32
|
|
// Construct and append a RCRL instruction to the active function.
|
|
func (c *Context) RCRL(ci, mr operand.Op) {
|
|
if inst, err := x86.RCRL(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RCRL: Rotate Right through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCRL 1 r32
|
|
// RCRL imm8 r32
|
|
// RCRL cl r32
|
|
// RCRL 1 m32
|
|
// RCRL imm8 m32
|
|
// RCRL cl m32
|
|
// Construct and append a RCRL instruction to the active function.
|
|
// Operates on the global context.
|
|
func RCRL(ci, mr operand.Op) { ctx.RCRL(ci, mr) }
|
|
|
|
// RCRQ: Rotate Right through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCRQ 1 r64
|
|
// RCRQ imm8 r64
|
|
// RCRQ cl r64
|
|
// RCRQ 1 m64
|
|
// RCRQ imm8 m64
|
|
// RCRQ cl m64
|
|
// Construct and append a RCRQ instruction to the active function.
|
|
func (c *Context) RCRQ(ci, mr operand.Op) {
|
|
if inst, err := x86.RCRQ(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RCRQ: Rotate Right through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCRQ 1 r64
|
|
// RCRQ imm8 r64
|
|
// RCRQ cl r64
|
|
// RCRQ 1 m64
|
|
// RCRQ imm8 m64
|
|
// RCRQ cl m64
|
|
// Construct and append a RCRQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func RCRQ(ci, mr operand.Op) { ctx.RCRQ(ci, mr) }
|
|
|
|
// RCRW: Rotate Right through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCRW 1 r16
|
|
// RCRW imm8 r16
|
|
// RCRW cl r16
|
|
// RCRW 1 m16
|
|
// RCRW imm8 m16
|
|
// RCRW cl m16
|
|
// Construct and append a RCRW instruction to the active function.
|
|
func (c *Context) RCRW(ci, mr operand.Op) {
|
|
if inst, err := x86.RCRW(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RCRW: Rotate Right through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCRW 1 r16
|
|
// RCRW imm8 r16
|
|
// RCRW cl r16
|
|
// RCRW 1 m16
|
|
// RCRW imm8 m16
|
|
// RCRW cl m16
|
|
// Construct and append a RCRW instruction to the active function.
|
|
// Operates on the global context.
|
|
func RCRW(ci, mr operand.Op) { ctx.RCRW(ci, mr) }
|
|
|
|
// RDRANDL: Read Random Number.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDRANDL r32
|
|
// Construct and append a RDRANDL instruction to the active function.
|
|
func (c *Context) RDRANDL(r operand.Op) {
|
|
if inst, err := x86.RDRANDL(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RDRANDL: Read Random Number.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDRANDL r32
|
|
// Construct and append a RDRANDL instruction to the active function.
|
|
// Operates on the global context.
|
|
func RDRANDL(r operand.Op) { ctx.RDRANDL(r) }
|
|
|
|
// RDRANDQ: Read Random Number.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDRANDQ r64
|
|
// Construct and append a RDRANDQ instruction to the active function.
|
|
func (c *Context) RDRANDQ(r operand.Op) {
|
|
if inst, err := x86.RDRANDQ(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RDRANDQ: Read Random Number.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDRANDQ r64
|
|
// Construct and append a RDRANDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func RDRANDQ(r operand.Op) { ctx.RDRANDQ(r) }
|
|
|
|
// RDRANDW: Read Random Number.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDRANDW r16
|
|
// Construct and append a RDRANDW instruction to the active function.
|
|
func (c *Context) RDRANDW(r operand.Op) {
|
|
if inst, err := x86.RDRANDW(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RDRANDW: Read Random Number.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDRANDW r16
|
|
// Construct and append a RDRANDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func RDRANDW(r operand.Op) { ctx.RDRANDW(r) }
|
|
|
|
// RDSEEDL: Read Random SEED.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDSEEDL r32
|
|
// Construct and append a RDSEEDL instruction to the active function.
|
|
func (c *Context) RDSEEDL(r operand.Op) {
|
|
if inst, err := x86.RDSEEDL(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RDSEEDL: Read Random SEED.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDSEEDL r32
|
|
// Construct and append a RDSEEDL instruction to the active function.
|
|
// Operates on the global context.
|
|
func RDSEEDL(r operand.Op) { ctx.RDSEEDL(r) }
|
|
|
|
// RDSEEDQ: Read Random SEED.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDSEEDQ r64
|
|
// Construct and append a RDSEEDQ instruction to the active function.
|
|
func (c *Context) RDSEEDQ(r operand.Op) {
|
|
if inst, err := x86.RDSEEDQ(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RDSEEDQ: Read Random SEED.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDSEEDQ r64
|
|
// Construct and append a RDSEEDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func RDSEEDQ(r operand.Op) { ctx.RDSEEDQ(r) }
|
|
|
|
// RDSEEDW: Read Random SEED.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDSEEDW r16
|
|
// Construct and append a RDSEEDW instruction to the active function.
|
|
func (c *Context) RDSEEDW(r operand.Op) {
|
|
if inst, err := x86.RDSEEDW(r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RDSEEDW: Read Random SEED.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDSEEDW r16
|
|
// Construct and append a RDSEEDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func RDSEEDW(r operand.Op) { ctx.RDSEEDW(r) }
|
|
|
|
// RDTSC: Read Time-Stamp Counter.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDTSC
|
|
// Construct and append a RDTSC instruction to the active function.
|
|
func (c *Context) RDTSC() {
|
|
if inst, err := x86.RDTSC(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RDTSC: Read Time-Stamp Counter.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDTSC
|
|
// Construct and append a RDTSC instruction to the active function.
|
|
// Operates on the global context.
|
|
func RDTSC() { ctx.RDTSC() }
|
|
|
|
// RDTSCP: Read Time-Stamp Counter and Processor ID.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDTSCP
|
|
// Construct and append a RDTSCP instruction to the active function.
|
|
func (c *Context) RDTSCP() {
|
|
if inst, err := x86.RDTSCP(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RDTSCP: Read Time-Stamp Counter and Processor ID.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDTSCP
|
|
// Construct and append a RDTSCP instruction to the active function.
|
|
// Operates on the global context.
|
|
func RDTSCP() { ctx.RDTSCP() }
|
|
|
|
// RET: Return from Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RET
|
|
// Construct and append a RET instruction to the active function.
|
|
func (c *Context) RET() {
|
|
if inst, err := x86.RET(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RET: Return from Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RET
|
|
// Construct and append a RET instruction to the active function.
|
|
// Operates on the global context.
|
|
func RET() { ctx.RET() }
|
|
|
|
// RETFL: Return from Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RETFL imm16
|
|
// Construct and append a RETFL instruction to the active function.
|
|
func (c *Context) RETFL(i operand.Op) {
|
|
if inst, err := x86.RETFL(i); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RETFL: Return from Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RETFL imm16
|
|
// Construct and append a RETFL instruction to the active function.
|
|
// Operates on the global context.
|
|
func RETFL(i operand.Op) { ctx.RETFL(i) }
|
|
|
|
// RETFQ: Return from Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RETFQ imm16
|
|
// Construct and append a RETFQ instruction to the active function.
|
|
func (c *Context) RETFQ(i operand.Op) {
|
|
if inst, err := x86.RETFQ(i); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RETFQ: Return from Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RETFQ imm16
|
|
// Construct and append a RETFQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func RETFQ(i operand.Op) { ctx.RETFQ(i) }
|
|
|
|
// RETFW: Return from Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RETFW imm16
|
|
// Construct and append a RETFW instruction to the active function.
|
|
func (c *Context) RETFW(i operand.Op) {
|
|
if inst, err := x86.RETFW(i); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RETFW: Return from Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RETFW imm16
|
|
// Construct and append a RETFW instruction to the active function.
|
|
// Operates on the global context.
|
|
func RETFW(i operand.Op) { ctx.RETFW(i) }
|
|
|
|
// ROLB: Rotate Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROLB 1 r8
|
|
// ROLB imm8 r8
|
|
// ROLB cl r8
|
|
// ROLB 1 m8
|
|
// ROLB imm8 m8
|
|
// ROLB cl m8
|
|
// Construct and append a ROLB instruction to the active function.
|
|
func (c *Context) ROLB(ci, mr operand.Op) {
|
|
if inst, err := x86.ROLB(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ROLB: Rotate Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROLB 1 r8
|
|
// ROLB imm8 r8
|
|
// ROLB cl r8
|
|
// ROLB 1 m8
|
|
// ROLB imm8 m8
|
|
// ROLB cl m8
|
|
// Construct and append a ROLB instruction to the active function.
|
|
// Operates on the global context.
|
|
func ROLB(ci, mr operand.Op) { ctx.ROLB(ci, mr) }
|
|
|
|
// ROLL: Rotate Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROLL 1 r32
|
|
// ROLL imm8 r32
|
|
// ROLL cl r32
|
|
// ROLL 1 m32
|
|
// ROLL imm8 m32
|
|
// ROLL cl m32
|
|
// Construct and append a ROLL instruction to the active function.
|
|
func (c *Context) ROLL(ci, mr operand.Op) {
|
|
if inst, err := x86.ROLL(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ROLL: Rotate Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROLL 1 r32
|
|
// ROLL imm8 r32
|
|
// ROLL cl r32
|
|
// ROLL 1 m32
|
|
// ROLL imm8 m32
|
|
// ROLL cl m32
|
|
// Construct and append a ROLL instruction to the active function.
|
|
// Operates on the global context.
|
|
func ROLL(ci, mr operand.Op) { ctx.ROLL(ci, mr) }
|
|
|
|
// ROLQ: Rotate Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROLQ 1 r64
|
|
// ROLQ imm8 r64
|
|
// ROLQ cl r64
|
|
// ROLQ 1 m64
|
|
// ROLQ imm8 m64
|
|
// ROLQ cl m64
|
|
// Construct and append a ROLQ instruction to the active function.
|
|
func (c *Context) ROLQ(ci, mr operand.Op) {
|
|
if inst, err := x86.ROLQ(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ROLQ: Rotate Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROLQ 1 r64
|
|
// ROLQ imm8 r64
|
|
// ROLQ cl r64
|
|
// ROLQ 1 m64
|
|
// ROLQ imm8 m64
|
|
// ROLQ cl m64
|
|
// Construct and append a ROLQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func ROLQ(ci, mr operand.Op) { ctx.ROLQ(ci, mr) }
|
|
|
|
// ROLW: Rotate Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROLW 1 r16
|
|
// ROLW imm8 r16
|
|
// ROLW cl r16
|
|
// ROLW 1 m16
|
|
// ROLW imm8 m16
|
|
// ROLW cl m16
|
|
// Construct and append a ROLW instruction to the active function.
|
|
func (c *Context) ROLW(ci, mr operand.Op) {
|
|
if inst, err := x86.ROLW(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ROLW: Rotate Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROLW 1 r16
|
|
// ROLW imm8 r16
|
|
// ROLW cl r16
|
|
// ROLW 1 m16
|
|
// ROLW imm8 m16
|
|
// ROLW cl m16
|
|
// Construct and append a ROLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func ROLW(ci, mr operand.Op) { ctx.ROLW(ci, mr) }
|
|
|
|
// RORB: Rotate Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORB 1 r8
|
|
// RORB imm8 r8
|
|
// RORB cl r8
|
|
// RORB 1 m8
|
|
// RORB imm8 m8
|
|
// RORB cl m8
|
|
// Construct and append a RORB instruction to the active function.
|
|
func (c *Context) RORB(ci, mr operand.Op) {
|
|
if inst, err := x86.RORB(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RORB: Rotate Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORB 1 r8
|
|
// RORB imm8 r8
|
|
// RORB cl r8
|
|
// RORB 1 m8
|
|
// RORB imm8 m8
|
|
// RORB cl m8
|
|
// Construct and append a RORB instruction to the active function.
|
|
// Operates on the global context.
|
|
func RORB(ci, mr operand.Op) { ctx.RORB(ci, mr) }
|
|
|
|
// RORL: Rotate Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORL 1 r32
|
|
// RORL imm8 r32
|
|
// RORL cl r32
|
|
// RORL 1 m32
|
|
// RORL imm8 m32
|
|
// RORL cl m32
|
|
// Construct and append a RORL instruction to the active function.
|
|
func (c *Context) RORL(ci, mr operand.Op) {
|
|
if inst, err := x86.RORL(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RORL: Rotate Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORL 1 r32
|
|
// RORL imm8 r32
|
|
// RORL cl r32
|
|
// RORL 1 m32
|
|
// RORL imm8 m32
|
|
// RORL cl m32
|
|
// Construct and append a RORL instruction to the active function.
|
|
// Operates on the global context.
|
|
func RORL(ci, mr operand.Op) { ctx.RORL(ci, mr) }
|
|
|
|
// RORQ: Rotate Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORQ 1 r64
|
|
// RORQ imm8 r64
|
|
// RORQ cl r64
|
|
// RORQ 1 m64
|
|
// RORQ imm8 m64
|
|
// RORQ cl m64
|
|
// Construct and append a RORQ instruction to the active function.
|
|
func (c *Context) RORQ(ci, mr operand.Op) {
|
|
if inst, err := x86.RORQ(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RORQ: Rotate Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORQ 1 r64
|
|
// RORQ imm8 r64
|
|
// RORQ cl r64
|
|
// RORQ 1 m64
|
|
// RORQ imm8 m64
|
|
// RORQ cl m64
|
|
// Construct and append a RORQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func RORQ(ci, mr operand.Op) { ctx.RORQ(ci, mr) }
|
|
|
|
// RORW: Rotate Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORW 1 r16
|
|
// RORW imm8 r16
|
|
// RORW cl r16
|
|
// RORW 1 m16
|
|
// RORW imm8 m16
|
|
// RORW cl m16
|
|
// Construct and append a RORW instruction to the active function.
|
|
func (c *Context) RORW(ci, mr operand.Op) {
|
|
if inst, err := x86.RORW(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RORW: Rotate Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORW 1 r16
|
|
// RORW imm8 r16
|
|
// RORW cl r16
|
|
// RORW 1 m16
|
|
// RORW imm8 m16
|
|
// RORW cl m16
|
|
// Construct and append a RORW instruction to the active function.
|
|
// Operates on the global context.
|
|
func RORW(ci, mr operand.Op) { ctx.RORW(ci, mr) }
|
|
|
|
// RORXL: Rotate Right Logical Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORXL imm8 r32 r32
|
|
// RORXL imm8 m32 r32
|
|
// Construct and append a RORXL instruction to the active function.
|
|
func (c *Context) RORXL(i, mr, r operand.Op) {
|
|
if inst, err := x86.RORXL(i, mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RORXL: Rotate Right Logical Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORXL imm8 r32 r32
|
|
// RORXL imm8 m32 r32
|
|
// Construct and append a RORXL instruction to the active function.
|
|
// Operates on the global context.
|
|
func RORXL(i, mr, r operand.Op) { ctx.RORXL(i, mr, r) }
|
|
|
|
// RORXQ: Rotate Right Logical Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORXQ imm8 r64 r64
|
|
// RORXQ imm8 m64 r64
|
|
// Construct and append a RORXQ instruction to the active function.
|
|
func (c *Context) RORXQ(i, mr, r operand.Op) {
|
|
if inst, err := x86.RORXQ(i, mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RORXQ: Rotate Right Logical Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORXQ imm8 r64 r64
|
|
// RORXQ imm8 m64 r64
|
|
// Construct and append a RORXQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func RORXQ(i, mr, r operand.Op) { ctx.RORXQ(i, mr, r) }
|
|
|
|
// ROUNDPD: Round Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROUNDPD imm8 xmm xmm
|
|
// ROUNDPD imm8 m128 xmm
|
|
// Construct and append a ROUNDPD instruction to the active function.
|
|
func (c *Context) ROUNDPD(i, mx, x operand.Op) {
|
|
if inst, err := x86.ROUNDPD(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ROUNDPD: Round Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROUNDPD imm8 xmm xmm
|
|
// ROUNDPD imm8 m128 xmm
|
|
// Construct and append a ROUNDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func ROUNDPD(i, mx, x operand.Op) { ctx.ROUNDPD(i, mx, x) }
|
|
|
|
// ROUNDPS: Round Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROUNDPS imm8 xmm xmm
|
|
// ROUNDPS imm8 m128 xmm
|
|
// Construct and append a ROUNDPS instruction to the active function.
|
|
func (c *Context) ROUNDPS(i, mx, x operand.Op) {
|
|
if inst, err := x86.ROUNDPS(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ROUNDPS: Round Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROUNDPS imm8 xmm xmm
|
|
// ROUNDPS imm8 m128 xmm
|
|
// Construct and append a ROUNDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func ROUNDPS(i, mx, x operand.Op) { ctx.ROUNDPS(i, mx, x) }
|
|
|
|
// ROUNDSD: Round Scalar Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROUNDSD imm8 xmm xmm
|
|
// ROUNDSD imm8 m64 xmm
|
|
// Construct and append a ROUNDSD instruction to the active function.
|
|
func (c *Context) ROUNDSD(i, mx, x operand.Op) {
|
|
if inst, err := x86.ROUNDSD(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ROUNDSD: Round Scalar Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROUNDSD imm8 xmm xmm
|
|
// ROUNDSD imm8 m64 xmm
|
|
// Construct and append a ROUNDSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func ROUNDSD(i, mx, x operand.Op) { ctx.ROUNDSD(i, mx, x) }
|
|
|
|
// ROUNDSS: Round Scalar Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROUNDSS imm8 xmm xmm
|
|
// ROUNDSS imm8 m32 xmm
|
|
// Construct and append a ROUNDSS instruction to the active function.
|
|
func (c *Context) ROUNDSS(i, mx, x operand.Op) {
|
|
if inst, err := x86.ROUNDSS(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ROUNDSS: Round Scalar Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROUNDSS imm8 xmm xmm
|
|
// ROUNDSS imm8 m32 xmm
|
|
// Construct and append a ROUNDSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func ROUNDSS(i, mx, x operand.Op) { ctx.ROUNDSS(i, mx, x) }
|
|
|
|
// RSQRTPS: Compute Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RSQRTPS xmm xmm
|
|
// RSQRTPS m128 xmm
|
|
// Construct and append a RSQRTPS instruction to the active function.
|
|
func (c *Context) RSQRTPS(mx, x operand.Op) {
|
|
if inst, err := x86.RSQRTPS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RSQRTPS: Compute Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RSQRTPS xmm xmm
|
|
// RSQRTPS m128 xmm
|
|
// Construct and append a RSQRTPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func RSQRTPS(mx, x operand.Op) { ctx.RSQRTPS(mx, x) }
|
|
|
|
// RSQRTSS: Compute Reciprocal of Square Root of Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RSQRTSS xmm xmm
|
|
// RSQRTSS m32 xmm
|
|
// Construct and append a RSQRTSS instruction to the active function.
|
|
func (c *Context) RSQRTSS(mx, x operand.Op) {
|
|
if inst, err := x86.RSQRTSS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// RSQRTSS: Compute Reciprocal of Square Root of Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RSQRTSS xmm xmm
|
|
// RSQRTSS m32 xmm
|
|
// Construct and append a RSQRTSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func RSQRTSS(mx, x operand.Op) { ctx.RSQRTSS(mx, x) }
|
|
|
|
// SALB: Arithmetic Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SALB 1 r8
|
|
// SALB imm8 r8
|
|
// SALB cl r8
|
|
// SALB 1 m8
|
|
// SALB imm8 m8
|
|
// SALB cl m8
|
|
// Construct and append a SALB instruction to the active function.
|
|
func (c *Context) SALB(ci, mr operand.Op) {
|
|
if inst, err := x86.SALB(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SALB: Arithmetic Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SALB 1 r8
|
|
// SALB imm8 r8
|
|
// SALB cl r8
|
|
// SALB 1 m8
|
|
// SALB imm8 m8
|
|
// SALB cl m8
|
|
// Construct and append a SALB instruction to the active function.
|
|
// Operates on the global context.
|
|
func SALB(ci, mr operand.Op) { ctx.SALB(ci, mr) }
|
|
|
|
// SALL: Arithmetic Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SALL 1 r32
|
|
// SALL imm8 r32
|
|
// SALL cl r32
|
|
// SALL 1 m32
|
|
// SALL imm8 m32
|
|
// SALL cl m32
|
|
// Construct and append a SALL instruction to the active function.
|
|
func (c *Context) SALL(ci, mr operand.Op) {
|
|
if inst, err := x86.SALL(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SALL: Arithmetic Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SALL 1 r32
|
|
// SALL imm8 r32
|
|
// SALL cl r32
|
|
// SALL 1 m32
|
|
// SALL imm8 m32
|
|
// SALL cl m32
|
|
// Construct and append a SALL instruction to the active function.
|
|
// Operates on the global context.
|
|
func SALL(ci, mr operand.Op) { ctx.SALL(ci, mr) }
|
|
|
|
// SALQ: Arithmetic Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SALQ 1 r64
|
|
// SALQ imm8 r64
|
|
// SALQ cl r64
|
|
// SALQ 1 m64
|
|
// SALQ imm8 m64
|
|
// SALQ cl m64
|
|
// Construct and append a SALQ instruction to the active function.
|
|
func (c *Context) SALQ(ci, mr operand.Op) {
|
|
if inst, err := x86.SALQ(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SALQ: Arithmetic Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SALQ 1 r64
|
|
// SALQ imm8 r64
|
|
// SALQ cl r64
|
|
// SALQ 1 m64
|
|
// SALQ imm8 m64
|
|
// SALQ cl m64
|
|
// Construct and append a SALQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func SALQ(ci, mr operand.Op) { ctx.SALQ(ci, mr) }
|
|
|
|
// SALW: Arithmetic Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SALW 1 r16
|
|
// SALW imm8 r16
|
|
// SALW cl r16
|
|
// SALW 1 m16
|
|
// SALW imm8 m16
|
|
// SALW cl m16
|
|
// Construct and append a SALW instruction to the active function.
|
|
func (c *Context) SALW(ci, mr operand.Op) {
|
|
if inst, err := x86.SALW(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SALW: Arithmetic Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SALW 1 r16
|
|
// SALW imm8 r16
|
|
// SALW cl r16
|
|
// SALW 1 m16
|
|
// SALW imm8 m16
|
|
// SALW cl m16
|
|
// Construct and append a SALW instruction to the active function.
|
|
// Operates on the global context.
|
|
func SALW(ci, mr operand.Op) { ctx.SALW(ci, mr) }
|
|
|
|
// SARB: Arithmetic Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARB 1 r8
|
|
// SARB imm8 r8
|
|
// SARB cl r8
|
|
// SARB 1 m8
|
|
// SARB imm8 m8
|
|
// SARB cl m8
|
|
// Construct and append a SARB instruction to the active function.
|
|
func (c *Context) SARB(ci, mr operand.Op) {
|
|
if inst, err := x86.SARB(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SARB: Arithmetic Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARB 1 r8
|
|
// SARB imm8 r8
|
|
// SARB cl r8
|
|
// SARB 1 m8
|
|
// SARB imm8 m8
|
|
// SARB cl m8
|
|
// Construct and append a SARB instruction to the active function.
|
|
// Operates on the global context.
|
|
func SARB(ci, mr operand.Op) { ctx.SARB(ci, mr) }
|
|
|
|
// SARL: Arithmetic Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARL 1 r32
|
|
// SARL imm8 r32
|
|
// SARL cl r32
|
|
// SARL 1 m32
|
|
// SARL imm8 m32
|
|
// SARL cl m32
|
|
// Construct and append a SARL instruction to the active function.
|
|
func (c *Context) SARL(ci, mr operand.Op) {
|
|
if inst, err := x86.SARL(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SARL: Arithmetic Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARL 1 r32
|
|
// SARL imm8 r32
|
|
// SARL cl r32
|
|
// SARL 1 m32
|
|
// SARL imm8 m32
|
|
// SARL cl m32
|
|
// Construct and append a SARL instruction to the active function.
|
|
// Operates on the global context.
|
|
func SARL(ci, mr operand.Op) { ctx.SARL(ci, mr) }
|
|
|
|
// SARQ: Arithmetic Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARQ 1 r64
|
|
// SARQ imm8 r64
|
|
// SARQ cl r64
|
|
// SARQ 1 m64
|
|
// SARQ imm8 m64
|
|
// SARQ cl m64
|
|
// Construct and append a SARQ instruction to the active function.
|
|
func (c *Context) SARQ(ci, mr operand.Op) {
|
|
if inst, err := x86.SARQ(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SARQ: Arithmetic Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARQ 1 r64
|
|
// SARQ imm8 r64
|
|
// SARQ cl r64
|
|
// SARQ 1 m64
|
|
// SARQ imm8 m64
|
|
// SARQ cl m64
|
|
// Construct and append a SARQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func SARQ(ci, mr operand.Op) { ctx.SARQ(ci, mr) }
|
|
|
|
// SARW: Arithmetic Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARW 1 r16
|
|
// SARW imm8 r16
|
|
// SARW cl r16
|
|
// SARW 1 m16
|
|
// SARW imm8 m16
|
|
// SARW cl m16
|
|
// Construct and append a SARW instruction to the active function.
|
|
func (c *Context) SARW(ci, mr operand.Op) {
|
|
if inst, err := x86.SARW(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SARW: Arithmetic Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARW 1 r16
|
|
// SARW imm8 r16
|
|
// SARW cl r16
|
|
// SARW 1 m16
|
|
// SARW imm8 m16
|
|
// SARW cl m16
|
|
// Construct and append a SARW instruction to the active function.
|
|
// Operates on the global context.
|
|
func SARW(ci, mr operand.Op) { ctx.SARW(ci, mr) }
|
|
|
|
// SARXL: Arithmetic Shift Right Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARXL r32 r32 r32
|
|
// SARXL r32 m32 r32
|
|
// Construct and append a SARXL instruction to the active function.
|
|
func (c *Context) SARXL(r, mr, r1 operand.Op) {
|
|
if inst, err := x86.SARXL(r, mr, r1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SARXL: Arithmetic Shift Right Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARXL r32 r32 r32
|
|
// SARXL r32 m32 r32
|
|
// Construct and append a SARXL instruction to the active function.
|
|
// Operates on the global context.
|
|
func SARXL(r, mr, r1 operand.Op) { ctx.SARXL(r, mr, r1) }
|
|
|
|
// SARXQ: Arithmetic Shift Right Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARXQ r64 r64 r64
|
|
// SARXQ r64 m64 r64
|
|
// Construct and append a SARXQ instruction to the active function.
|
|
func (c *Context) SARXQ(r, mr, r1 operand.Op) {
|
|
if inst, err := x86.SARXQ(r, mr, r1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SARXQ: Arithmetic Shift Right Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARXQ r64 r64 r64
|
|
// SARXQ r64 m64 r64
|
|
// Construct and append a SARXQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func SARXQ(r, mr, r1 operand.Op) { ctx.SARXQ(r, mr, r1) }
|
|
|
|
// SBBB: Subtract with Borrow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SBBB imm8 al
|
|
// SBBB imm8 r8
|
|
// SBBB r8 r8
|
|
// SBBB m8 r8
|
|
// SBBB imm8 m8
|
|
// SBBB r8 m8
|
|
// Construct and append a SBBB instruction to the active function.
|
|
func (c *Context) SBBB(imr, amr operand.Op) {
|
|
if inst, err := x86.SBBB(imr, amr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SBBB: Subtract with Borrow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SBBB imm8 al
|
|
// SBBB imm8 r8
|
|
// SBBB r8 r8
|
|
// SBBB m8 r8
|
|
// SBBB imm8 m8
|
|
// SBBB r8 m8
|
|
// Construct and append a SBBB instruction to the active function.
|
|
// Operates on the global context.
|
|
func SBBB(imr, amr operand.Op) { ctx.SBBB(imr, amr) }
|
|
|
|
// SBBL: Subtract with Borrow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SBBL imm32 eax
|
|
// SBBL imm8 r32
|
|
// SBBL imm32 r32
|
|
// SBBL r32 r32
|
|
// SBBL m32 r32
|
|
// SBBL imm8 m32
|
|
// SBBL imm32 m32
|
|
// SBBL r32 m32
|
|
// Construct and append a SBBL instruction to the active function.
|
|
func (c *Context) SBBL(imr, emr operand.Op) {
|
|
if inst, err := x86.SBBL(imr, emr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SBBL: Subtract with Borrow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SBBL imm32 eax
|
|
// SBBL imm8 r32
|
|
// SBBL imm32 r32
|
|
// SBBL r32 r32
|
|
// SBBL m32 r32
|
|
// SBBL imm8 m32
|
|
// SBBL imm32 m32
|
|
// SBBL r32 m32
|
|
// Construct and append a SBBL instruction to the active function.
|
|
// Operates on the global context.
|
|
func SBBL(imr, emr operand.Op) { ctx.SBBL(imr, emr) }
|
|
|
|
// SBBQ: Subtract with Borrow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SBBQ imm32 rax
|
|
// SBBQ imm8 r64
|
|
// SBBQ imm32 r64
|
|
// SBBQ r64 r64
|
|
// SBBQ m64 r64
|
|
// SBBQ imm8 m64
|
|
// SBBQ imm32 m64
|
|
// SBBQ r64 m64
|
|
// Construct and append a SBBQ instruction to the active function.
|
|
func (c *Context) SBBQ(imr, mr operand.Op) {
|
|
if inst, err := x86.SBBQ(imr, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SBBQ: Subtract with Borrow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SBBQ imm32 rax
|
|
// SBBQ imm8 r64
|
|
// SBBQ imm32 r64
|
|
// SBBQ r64 r64
|
|
// SBBQ m64 r64
|
|
// SBBQ imm8 m64
|
|
// SBBQ imm32 m64
|
|
// SBBQ r64 m64
|
|
// Construct and append a SBBQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func SBBQ(imr, mr operand.Op) { ctx.SBBQ(imr, mr) }
|
|
|
|
// SBBW: Subtract with Borrow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SBBW imm16 ax
|
|
// SBBW imm8 r16
|
|
// SBBW imm16 r16
|
|
// SBBW r16 r16
|
|
// SBBW m16 r16
|
|
// SBBW imm8 m16
|
|
// SBBW imm16 m16
|
|
// SBBW r16 m16
|
|
// Construct and append a SBBW instruction to the active function.
|
|
func (c *Context) SBBW(imr, amr operand.Op) {
|
|
if inst, err := x86.SBBW(imr, amr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SBBW: Subtract with Borrow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SBBW imm16 ax
|
|
// SBBW imm8 r16
|
|
// SBBW imm16 r16
|
|
// SBBW r16 r16
|
|
// SBBW m16 r16
|
|
// SBBW imm8 m16
|
|
// SBBW imm16 m16
|
|
// SBBW r16 m16
|
|
// Construct and append a SBBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func SBBW(imr, amr operand.Op) { ctx.SBBW(imr, amr) }
|
|
|
|
// SETCC: Set byte if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETCC r8
|
|
// SETCC m8
|
|
// Construct and append a SETCC instruction to the active function.
|
|
func (c *Context) SETCC(mr operand.Op) {
|
|
if inst, err := x86.SETCC(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SETCC: Set byte if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETCC r8
|
|
// SETCC m8
|
|
// Construct and append a SETCC instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETCC(mr operand.Op) { ctx.SETCC(mr) }
|
|
|
|
// SETCS: Set byte if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETCS r8
|
|
// SETCS m8
|
|
// Construct and append a SETCS instruction to the active function.
|
|
func (c *Context) SETCS(mr operand.Op) {
|
|
if inst, err := x86.SETCS(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SETCS: Set byte if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETCS r8
|
|
// SETCS m8
|
|
// Construct and append a SETCS instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETCS(mr operand.Op) { ctx.SETCS(mr) }
|
|
|
|
// SETEQ: Set byte if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETEQ r8
|
|
// SETEQ m8
|
|
// Construct and append a SETEQ instruction to the active function.
|
|
func (c *Context) SETEQ(mr operand.Op) {
|
|
if inst, err := x86.SETEQ(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SETEQ: Set byte if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETEQ r8
|
|
// SETEQ m8
|
|
// Construct and append a SETEQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETEQ(mr operand.Op) { ctx.SETEQ(mr) }
|
|
|
|
// SETGE: Set byte if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETGE r8
|
|
// SETGE m8
|
|
// Construct and append a SETGE instruction to the active function.
|
|
func (c *Context) SETGE(mr operand.Op) {
|
|
if inst, err := x86.SETGE(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SETGE: Set byte if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETGE r8
|
|
// SETGE m8
|
|
// Construct and append a SETGE instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETGE(mr operand.Op) { ctx.SETGE(mr) }
|
|
|
|
// SETGT: Set byte if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETGT r8
|
|
// SETGT m8
|
|
// Construct and append a SETGT instruction to the active function.
|
|
func (c *Context) SETGT(mr operand.Op) {
|
|
if inst, err := x86.SETGT(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SETGT: Set byte if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETGT r8
|
|
// SETGT m8
|
|
// Construct and append a SETGT instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETGT(mr operand.Op) { ctx.SETGT(mr) }
|
|
|
|
// SETHI: Set byte if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETHI r8
|
|
// SETHI m8
|
|
// Construct and append a SETHI instruction to the active function.
|
|
func (c *Context) SETHI(mr operand.Op) {
|
|
if inst, err := x86.SETHI(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SETHI: Set byte if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETHI r8
|
|
// SETHI m8
|
|
// Construct and append a SETHI instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETHI(mr operand.Op) { ctx.SETHI(mr) }
|
|
|
|
// SETLE: Set byte if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETLE r8
|
|
// SETLE m8
|
|
// Construct and append a SETLE instruction to the active function.
|
|
func (c *Context) SETLE(mr operand.Op) {
|
|
if inst, err := x86.SETLE(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SETLE: Set byte if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETLE r8
|
|
// SETLE m8
|
|
// Construct and append a SETLE instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETLE(mr operand.Op) { ctx.SETLE(mr) }
|
|
|
|
// SETLS: Set byte if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETLS r8
|
|
// SETLS m8
|
|
// Construct and append a SETLS instruction to the active function.
|
|
func (c *Context) SETLS(mr operand.Op) {
|
|
if inst, err := x86.SETLS(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SETLS: Set byte if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETLS r8
|
|
// SETLS m8
|
|
// Construct and append a SETLS instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETLS(mr operand.Op) { ctx.SETLS(mr) }
|
|
|
|
// SETLT: Set byte if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETLT r8
|
|
// SETLT m8
|
|
// Construct and append a SETLT instruction to the active function.
|
|
func (c *Context) SETLT(mr operand.Op) {
|
|
if inst, err := x86.SETLT(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SETLT: Set byte if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETLT r8
|
|
// SETLT m8
|
|
// Construct and append a SETLT instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETLT(mr operand.Op) { ctx.SETLT(mr) }
|
|
|
|
// SETMI: Set byte if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETMI r8
|
|
// SETMI m8
|
|
// Construct and append a SETMI instruction to the active function.
|
|
func (c *Context) SETMI(mr operand.Op) {
|
|
if inst, err := x86.SETMI(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SETMI: Set byte if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETMI r8
|
|
// SETMI m8
|
|
// Construct and append a SETMI instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETMI(mr operand.Op) { ctx.SETMI(mr) }
|
|
|
|
// SETNE: Set byte if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETNE r8
|
|
// SETNE m8
|
|
// Construct and append a SETNE instruction to the active function.
|
|
func (c *Context) SETNE(mr operand.Op) {
|
|
if inst, err := x86.SETNE(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SETNE: Set byte if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETNE r8
|
|
// SETNE m8
|
|
// Construct and append a SETNE instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETNE(mr operand.Op) { ctx.SETNE(mr) }
|
|
|
|
// SETOC: Set byte if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETOC r8
|
|
// SETOC m8
|
|
// Construct and append a SETOC instruction to the active function.
|
|
func (c *Context) SETOC(mr operand.Op) {
|
|
if inst, err := x86.SETOC(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SETOC: Set byte if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETOC r8
|
|
// SETOC m8
|
|
// Construct and append a SETOC instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETOC(mr operand.Op) { ctx.SETOC(mr) }
|
|
|
|
// SETOS: Set byte if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETOS r8
|
|
// SETOS m8
|
|
// Construct and append a SETOS instruction to the active function.
|
|
func (c *Context) SETOS(mr operand.Op) {
|
|
if inst, err := x86.SETOS(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SETOS: Set byte if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETOS r8
|
|
// SETOS m8
|
|
// Construct and append a SETOS instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETOS(mr operand.Op) { ctx.SETOS(mr) }
|
|
|
|
// SETPC: Set byte if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETPC r8
|
|
// SETPC m8
|
|
// Construct and append a SETPC instruction to the active function.
|
|
func (c *Context) SETPC(mr operand.Op) {
|
|
if inst, err := x86.SETPC(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SETPC: Set byte if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETPC r8
|
|
// SETPC m8
|
|
// Construct and append a SETPC instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETPC(mr operand.Op) { ctx.SETPC(mr) }
|
|
|
|
// SETPL: Set byte if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETPL r8
|
|
// SETPL m8
|
|
// Construct and append a SETPL instruction to the active function.
|
|
func (c *Context) SETPL(mr operand.Op) {
|
|
if inst, err := x86.SETPL(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SETPL: Set byte if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETPL r8
|
|
// SETPL m8
|
|
// Construct and append a SETPL instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETPL(mr operand.Op) { ctx.SETPL(mr) }
|
|
|
|
// SETPS: Set byte if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETPS r8
|
|
// SETPS m8
|
|
// Construct and append a SETPS instruction to the active function.
|
|
func (c *Context) SETPS(mr operand.Op) {
|
|
if inst, err := x86.SETPS(mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SETPS: Set byte if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETPS r8
|
|
// SETPS m8
|
|
// Construct and append a SETPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETPS(mr operand.Op) { ctx.SETPS(mr) }
|
|
|
|
// SFENCE: Store Fence.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SFENCE
|
|
// Construct and append a SFENCE instruction to the active function.
|
|
func (c *Context) SFENCE() {
|
|
if inst, err := x86.SFENCE(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SFENCE: Store Fence.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SFENCE
|
|
// Construct and append a SFENCE instruction to the active function.
|
|
// Operates on the global context.
|
|
func SFENCE() { ctx.SFENCE() }
|
|
|
|
// SHA1MSG1: Perform an Intermediate Calculation for the Next Four SHA1 Message Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA1MSG1 xmm xmm
|
|
// SHA1MSG1 m128 xmm
|
|
// Construct and append a SHA1MSG1 instruction to the active function.
|
|
func (c *Context) SHA1MSG1(mx, x operand.Op) {
|
|
if inst, err := x86.SHA1MSG1(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SHA1MSG1: Perform an Intermediate Calculation for the Next Four SHA1 Message Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA1MSG1 xmm xmm
|
|
// SHA1MSG1 m128 xmm
|
|
// Construct and append a SHA1MSG1 instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHA1MSG1(mx, x operand.Op) { ctx.SHA1MSG1(mx, x) }
|
|
|
|
// SHA1MSG2: Perform a Final Calculation for the Next Four SHA1 Message Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA1MSG2 xmm xmm
|
|
// SHA1MSG2 m128 xmm
|
|
// Construct and append a SHA1MSG2 instruction to the active function.
|
|
func (c *Context) SHA1MSG2(mx, x operand.Op) {
|
|
if inst, err := x86.SHA1MSG2(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SHA1MSG2: Perform a Final Calculation for the Next Four SHA1 Message Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA1MSG2 xmm xmm
|
|
// SHA1MSG2 m128 xmm
|
|
// Construct and append a SHA1MSG2 instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHA1MSG2(mx, x operand.Op) { ctx.SHA1MSG2(mx, x) }
|
|
|
|
// SHA1NEXTE: Calculate SHA1 State Variable E after Four Rounds.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA1NEXTE xmm xmm
|
|
// SHA1NEXTE m128 xmm
|
|
// Construct and append a SHA1NEXTE instruction to the active function.
|
|
func (c *Context) SHA1NEXTE(mx, x operand.Op) {
|
|
if inst, err := x86.SHA1NEXTE(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SHA1NEXTE: Calculate SHA1 State Variable E after Four Rounds.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA1NEXTE xmm xmm
|
|
// SHA1NEXTE m128 xmm
|
|
// Construct and append a SHA1NEXTE instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHA1NEXTE(mx, x operand.Op) { ctx.SHA1NEXTE(mx, x) }
|
|
|
|
// SHA1RNDS4: Perform Four Rounds of SHA1 Operation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA1RNDS4 imm2u xmm xmm
|
|
// SHA1RNDS4 imm2u m128 xmm
|
|
// Construct and append a SHA1RNDS4 instruction to the active function.
|
|
func (c *Context) SHA1RNDS4(i, mx, x operand.Op) {
|
|
if inst, err := x86.SHA1RNDS4(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SHA1RNDS4: Perform Four Rounds of SHA1 Operation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA1RNDS4 imm2u xmm xmm
|
|
// SHA1RNDS4 imm2u m128 xmm
|
|
// Construct and append a SHA1RNDS4 instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHA1RNDS4(i, mx, x operand.Op) { ctx.SHA1RNDS4(i, mx, x) }
|
|
|
|
// SHA256MSG1: Perform an Intermediate Calculation for the Next Four SHA256 Message Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA256MSG1 xmm xmm
|
|
// SHA256MSG1 m128 xmm
|
|
// Construct and append a SHA256MSG1 instruction to the active function.
|
|
func (c *Context) SHA256MSG1(mx, x operand.Op) {
|
|
if inst, err := x86.SHA256MSG1(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SHA256MSG1: Perform an Intermediate Calculation for the Next Four SHA256 Message Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA256MSG1 xmm xmm
|
|
// SHA256MSG1 m128 xmm
|
|
// Construct and append a SHA256MSG1 instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHA256MSG1(mx, x operand.Op) { ctx.SHA256MSG1(mx, x) }
|
|
|
|
// SHA256MSG2: Perform a Final Calculation for the Next Four SHA256 Message Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA256MSG2 xmm xmm
|
|
// SHA256MSG2 m128 xmm
|
|
// Construct and append a SHA256MSG2 instruction to the active function.
|
|
func (c *Context) SHA256MSG2(mx, x operand.Op) {
|
|
if inst, err := x86.SHA256MSG2(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SHA256MSG2: Perform a Final Calculation for the Next Four SHA256 Message Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA256MSG2 xmm xmm
|
|
// SHA256MSG2 m128 xmm
|
|
// Construct and append a SHA256MSG2 instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHA256MSG2(mx, x operand.Op) { ctx.SHA256MSG2(mx, x) }
|
|
|
|
// SHA256RNDS2: Perform Two Rounds of SHA256 Operation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA256RNDS2 xmm0 xmm xmm
|
|
// SHA256RNDS2 xmm0 m128 xmm
|
|
// Construct and append a SHA256RNDS2 instruction to the active function.
|
|
func (c *Context) SHA256RNDS2(x, mx, x1 operand.Op) {
|
|
if inst, err := x86.SHA256RNDS2(x, mx, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SHA256RNDS2: Perform Two Rounds of SHA256 Operation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA256RNDS2 xmm0 xmm xmm
|
|
// SHA256RNDS2 xmm0 m128 xmm
|
|
// Construct and append a SHA256RNDS2 instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHA256RNDS2(x, mx, x1 operand.Op) { ctx.SHA256RNDS2(x, mx, x1) }
|
|
|
|
// SHLB: Logical Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLB 1 r8
|
|
// SHLB imm8 r8
|
|
// SHLB cl r8
|
|
// SHLB 1 m8
|
|
// SHLB imm8 m8
|
|
// SHLB cl m8
|
|
// Construct and append a SHLB instruction to the active function.
|
|
func (c *Context) SHLB(ci, mr operand.Op) {
|
|
if inst, err := x86.SHLB(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SHLB: Logical Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLB 1 r8
|
|
// SHLB imm8 r8
|
|
// SHLB cl r8
|
|
// SHLB 1 m8
|
|
// SHLB imm8 m8
|
|
// SHLB cl m8
|
|
// Construct and append a SHLB instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHLB(ci, mr operand.Op) { ctx.SHLB(ci, mr) }
|
|
|
|
// SHLL: Logical Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLL 1 r32
|
|
// SHLL imm8 r32
|
|
// SHLL cl r32
|
|
// SHLL 1 m32
|
|
// SHLL imm8 m32
|
|
// SHLL cl m32
|
|
// SHLL imm8 r32 r32
|
|
// SHLL cl r32 r32
|
|
// SHLL imm8 r32 m32
|
|
// SHLL cl r32 m32
|
|
// Construct and append a SHLL instruction to the active function.
|
|
func (c *Context) SHLL(ops ...operand.Op) {
|
|
if inst, err := x86.SHLL(ops...); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SHLL: Logical Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLL 1 r32
|
|
// SHLL imm8 r32
|
|
// SHLL cl r32
|
|
// SHLL 1 m32
|
|
// SHLL imm8 m32
|
|
// SHLL cl m32
|
|
// SHLL imm8 r32 r32
|
|
// SHLL cl r32 r32
|
|
// SHLL imm8 r32 m32
|
|
// SHLL cl r32 m32
|
|
// Construct and append a SHLL instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHLL(ops ...operand.Op) { ctx.SHLL(ops...) }
|
|
|
|
// SHLQ: Logical Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLQ 1 r64
|
|
// SHLQ imm8 r64
|
|
// SHLQ cl r64
|
|
// SHLQ 1 m64
|
|
// SHLQ imm8 m64
|
|
// SHLQ cl m64
|
|
// SHLQ imm8 r64 r64
|
|
// SHLQ cl r64 r64
|
|
// SHLQ imm8 r64 m64
|
|
// SHLQ cl r64 m64
|
|
// Construct and append a SHLQ instruction to the active function.
|
|
func (c *Context) SHLQ(ops ...operand.Op) {
|
|
if inst, err := x86.SHLQ(ops...); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SHLQ: Logical Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLQ 1 r64
|
|
// SHLQ imm8 r64
|
|
// SHLQ cl r64
|
|
// SHLQ 1 m64
|
|
// SHLQ imm8 m64
|
|
// SHLQ cl m64
|
|
// SHLQ imm8 r64 r64
|
|
// SHLQ cl r64 r64
|
|
// SHLQ imm8 r64 m64
|
|
// SHLQ cl r64 m64
|
|
// Construct and append a SHLQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHLQ(ops ...operand.Op) { ctx.SHLQ(ops...) }
|
|
|
|
// SHLW: Logical Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLW 1 r16
|
|
// SHLW imm8 r16
|
|
// SHLW cl r16
|
|
// SHLW 1 m16
|
|
// SHLW imm8 m16
|
|
// SHLW cl m16
|
|
// SHLW imm8 r16 r16
|
|
// SHLW cl r16 r16
|
|
// SHLW imm8 r16 m16
|
|
// SHLW cl r16 m16
|
|
// Construct and append a SHLW instruction to the active function.
|
|
func (c *Context) SHLW(ops ...operand.Op) {
|
|
if inst, err := x86.SHLW(ops...); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SHLW: Logical Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLW 1 r16
|
|
// SHLW imm8 r16
|
|
// SHLW cl r16
|
|
// SHLW 1 m16
|
|
// SHLW imm8 m16
|
|
// SHLW cl m16
|
|
// SHLW imm8 r16 r16
|
|
// SHLW cl r16 r16
|
|
// SHLW imm8 r16 m16
|
|
// SHLW cl r16 m16
|
|
// Construct and append a SHLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHLW(ops ...operand.Op) { ctx.SHLW(ops...) }
|
|
|
|
// SHLXL: Logical Shift Left Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLXL r32 r32 r32
|
|
// SHLXL r32 m32 r32
|
|
// Construct and append a SHLXL instruction to the active function.
|
|
func (c *Context) SHLXL(r, mr, r1 operand.Op) {
|
|
if inst, err := x86.SHLXL(r, mr, r1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SHLXL: Logical Shift Left Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLXL r32 r32 r32
|
|
// SHLXL r32 m32 r32
|
|
// Construct and append a SHLXL instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHLXL(r, mr, r1 operand.Op) { ctx.SHLXL(r, mr, r1) }
|
|
|
|
// SHLXQ: Logical Shift Left Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLXQ r64 r64 r64
|
|
// SHLXQ r64 m64 r64
|
|
// Construct and append a SHLXQ instruction to the active function.
|
|
func (c *Context) SHLXQ(r, mr, r1 operand.Op) {
|
|
if inst, err := x86.SHLXQ(r, mr, r1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SHLXQ: Logical Shift Left Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLXQ r64 r64 r64
|
|
// SHLXQ r64 m64 r64
|
|
// Construct and append a SHLXQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHLXQ(r, mr, r1 operand.Op) { ctx.SHLXQ(r, mr, r1) }
|
|
|
|
// SHRB: Logical Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRB 1 r8
|
|
// SHRB imm8 r8
|
|
// SHRB cl r8
|
|
// SHRB 1 m8
|
|
// SHRB imm8 m8
|
|
// SHRB cl m8
|
|
// Construct and append a SHRB instruction to the active function.
|
|
func (c *Context) SHRB(ci, mr operand.Op) {
|
|
if inst, err := x86.SHRB(ci, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SHRB: Logical Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRB 1 r8
|
|
// SHRB imm8 r8
|
|
// SHRB cl r8
|
|
// SHRB 1 m8
|
|
// SHRB imm8 m8
|
|
// SHRB cl m8
|
|
// Construct and append a SHRB instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHRB(ci, mr operand.Op) { ctx.SHRB(ci, mr) }
|
|
|
|
// SHRL: Logical Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRL 1 r32
|
|
// SHRL imm8 r32
|
|
// SHRL cl r32
|
|
// SHRL 1 m32
|
|
// SHRL imm8 m32
|
|
// SHRL cl m32
|
|
// SHRL imm8 r32 r32
|
|
// SHRL cl r32 r32
|
|
// SHRL imm8 r32 m32
|
|
// SHRL cl r32 m32
|
|
// Construct and append a SHRL instruction to the active function.
|
|
func (c *Context) SHRL(ops ...operand.Op) {
|
|
if inst, err := x86.SHRL(ops...); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SHRL: Logical Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRL 1 r32
|
|
// SHRL imm8 r32
|
|
// SHRL cl r32
|
|
// SHRL 1 m32
|
|
// SHRL imm8 m32
|
|
// SHRL cl m32
|
|
// SHRL imm8 r32 r32
|
|
// SHRL cl r32 r32
|
|
// SHRL imm8 r32 m32
|
|
// SHRL cl r32 m32
|
|
// Construct and append a SHRL instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHRL(ops ...operand.Op) { ctx.SHRL(ops...) }
|
|
|
|
// SHRQ: Logical Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRQ 1 r64
|
|
// SHRQ imm8 r64
|
|
// SHRQ cl r64
|
|
// SHRQ 1 m64
|
|
// SHRQ imm8 m64
|
|
// SHRQ cl m64
|
|
// SHRQ imm8 r64 r64
|
|
// SHRQ cl r64 r64
|
|
// SHRQ imm8 r64 m64
|
|
// SHRQ cl r64 m64
|
|
// Construct and append a SHRQ instruction to the active function.
|
|
func (c *Context) SHRQ(ops ...operand.Op) {
|
|
if inst, err := x86.SHRQ(ops...); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SHRQ: Logical Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRQ 1 r64
|
|
// SHRQ imm8 r64
|
|
// SHRQ cl r64
|
|
// SHRQ 1 m64
|
|
// SHRQ imm8 m64
|
|
// SHRQ cl m64
|
|
// SHRQ imm8 r64 r64
|
|
// SHRQ cl r64 r64
|
|
// SHRQ imm8 r64 m64
|
|
// SHRQ cl r64 m64
|
|
// Construct and append a SHRQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHRQ(ops ...operand.Op) { ctx.SHRQ(ops...) }
|
|
|
|
// SHRW: Logical Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRW 1 r16
|
|
// SHRW imm8 r16
|
|
// SHRW cl r16
|
|
// SHRW 1 m16
|
|
// SHRW imm8 m16
|
|
// SHRW cl m16
|
|
// SHRW imm8 r16 r16
|
|
// SHRW cl r16 r16
|
|
// SHRW imm8 r16 m16
|
|
// SHRW cl r16 m16
|
|
// Construct and append a SHRW instruction to the active function.
|
|
func (c *Context) SHRW(ops ...operand.Op) {
|
|
if inst, err := x86.SHRW(ops...); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SHRW: Logical Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRW 1 r16
|
|
// SHRW imm8 r16
|
|
// SHRW cl r16
|
|
// SHRW 1 m16
|
|
// SHRW imm8 m16
|
|
// SHRW cl m16
|
|
// SHRW imm8 r16 r16
|
|
// SHRW cl r16 r16
|
|
// SHRW imm8 r16 m16
|
|
// SHRW cl r16 m16
|
|
// Construct and append a SHRW instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHRW(ops ...operand.Op) { ctx.SHRW(ops...) }
|
|
|
|
// SHRXL: Logical Shift Right Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRXL r32 r32 r32
|
|
// SHRXL r32 m32 r32
|
|
// Construct and append a SHRXL instruction to the active function.
|
|
func (c *Context) SHRXL(r, mr, r1 operand.Op) {
|
|
if inst, err := x86.SHRXL(r, mr, r1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SHRXL: Logical Shift Right Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRXL r32 r32 r32
|
|
// SHRXL r32 m32 r32
|
|
// Construct and append a SHRXL instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHRXL(r, mr, r1 operand.Op) { ctx.SHRXL(r, mr, r1) }
|
|
|
|
// SHRXQ: Logical Shift Right Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRXQ r64 r64 r64
|
|
// SHRXQ r64 m64 r64
|
|
// Construct and append a SHRXQ instruction to the active function.
|
|
func (c *Context) SHRXQ(r, mr, r1 operand.Op) {
|
|
if inst, err := x86.SHRXQ(r, mr, r1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SHRXQ: Logical Shift Right Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRXQ r64 r64 r64
|
|
// SHRXQ r64 m64 r64
|
|
// Construct and append a SHRXQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHRXQ(r, mr, r1 operand.Op) { ctx.SHRXQ(r, mr, r1) }
|
|
|
|
// SHUFPD: Shuffle Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHUFPD imm8 xmm xmm
|
|
// SHUFPD imm8 m128 xmm
|
|
// Construct and append a SHUFPD instruction to the active function.
|
|
func (c *Context) SHUFPD(i, mx, x operand.Op) {
|
|
if inst, err := x86.SHUFPD(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SHUFPD: Shuffle Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHUFPD imm8 xmm xmm
|
|
// SHUFPD imm8 m128 xmm
|
|
// Construct and append a SHUFPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHUFPD(i, mx, x operand.Op) { ctx.SHUFPD(i, mx, x) }
|
|
|
|
// SHUFPS: Shuffle Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHUFPS imm8 xmm xmm
|
|
// SHUFPS imm8 m128 xmm
|
|
// Construct and append a SHUFPS instruction to the active function.
|
|
func (c *Context) SHUFPS(i, mx, x operand.Op) {
|
|
if inst, err := x86.SHUFPS(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SHUFPS: Shuffle Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHUFPS imm8 xmm xmm
|
|
// SHUFPS imm8 m128 xmm
|
|
// Construct and append a SHUFPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHUFPS(i, mx, x operand.Op) { ctx.SHUFPS(i, mx, x) }
|
|
|
|
// SQRTPD: Compute Square Roots of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SQRTPD xmm xmm
|
|
// SQRTPD m128 xmm
|
|
// Construct and append a SQRTPD instruction to the active function.
|
|
func (c *Context) SQRTPD(mx, x operand.Op) {
|
|
if inst, err := x86.SQRTPD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SQRTPD: Compute Square Roots of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SQRTPD xmm xmm
|
|
// SQRTPD m128 xmm
|
|
// Construct and append a SQRTPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func SQRTPD(mx, x operand.Op) { ctx.SQRTPD(mx, x) }
|
|
|
|
// SQRTPS: Compute Square Roots of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SQRTPS xmm xmm
|
|
// SQRTPS m128 xmm
|
|
// Construct and append a SQRTPS instruction to the active function.
|
|
func (c *Context) SQRTPS(mx, x operand.Op) {
|
|
if inst, err := x86.SQRTPS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SQRTPS: Compute Square Roots of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SQRTPS xmm xmm
|
|
// SQRTPS m128 xmm
|
|
// Construct and append a SQRTPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func SQRTPS(mx, x operand.Op) { ctx.SQRTPS(mx, x) }
|
|
|
|
// SQRTSD: Compute Square Root of Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SQRTSD xmm xmm
|
|
// SQRTSD m64 xmm
|
|
// Construct and append a SQRTSD instruction to the active function.
|
|
func (c *Context) SQRTSD(mx, x operand.Op) {
|
|
if inst, err := x86.SQRTSD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SQRTSD: Compute Square Root of Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SQRTSD xmm xmm
|
|
// SQRTSD m64 xmm
|
|
// Construct and append a SQRTSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func SQRTSD(mx, x operand.Op) { ctx.SQRTSD(mx, x) }
|
|
|
|
// SQRTSS: Compute Square Root of Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SQRTSS xmm xmm
|
|
// SQRTSS m32 xmm
|
|
// Construct and append a SQRTSS instruction to the active function.
|
|
func (c *Context) SQRTSS(mx, x operand.Op) {
|
|
if inst, err := x86.SQRTSS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SQRTSS: Compute Square Root of Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SQRTSS xmm xmm
|
|
// SQRTSS m32 xmm
|
|
// Construct and append a SQRTSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func SQRTSS(mx, x operand.Op) { ctx.SQRTSS(mx, x) }
|
|
|
|
// STC: Set Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// STC
|
|
// Construct and append a STC instruction to the active function.
|
|
func (c *Context) STC() {
|
|
if inst, err := x86.STC(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// STC: Set Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// STC
|
|
// Construct and append a STC instruction to the active function.
|
|
// Operates on the global context.
|
|
func STC() { ctx.STC() }
|
|
|
|
// STD: Set Direction Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// STD
|
|
// Construct and append a STD instruction to the active function.
|
|
func (c *Context) STD() {
|
|
if inst, err := x86.STD(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// STD: Set Direction Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// STD
|
|
// Construct and append a STD instruction to the active function.
|
|
// Operates on the global context.
|
|
func STD() { ctx.STD() }
|
|
|
|
// STMXCSR: Store MXCSR Register State.
|
|
//
|
|
// Forms:
|
|
//
|
|
// STMXCSR m32
|
|
// Construct and append a STMXCSR instruction to the active function.
|
|
func (c *Context) STMXCSR(m operand.Op) {
|
|
if inst, err := x86.STMXCSR(m); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// STMXCSR: Store MXCSR Register State.
|
|
//
|
|
// Forms:
|
|
//
|
|
// STMXCSR m32
|
|
// Construct and append a STMXCSR instruction to the active function.
|
|
// Operates on the global context.
|
|
func STMXCSR(m operand.Op) { ctx.STMXCSR(m) }
|
|
|
|
// SUBB: Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBB imm8 al
|
|
// SUBB imm8 r8
|
|
// SUBB r8 r8
|
|
// SUBB m8 r8
|
|
// SUBB imm8 m8
|
|
// SUBB r8 m8
|
|
// Construct and append a SUBB instruction to the active function.
|
|
func (c *Context) SUBB(imr, amr operand.Op) {
|
|
if inst, err := x86.SUBB(imr, amr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SUBB: Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBB imm8 al
|
|
// SUBB imm8 r8
|
|
// SUBB r8 r8
|
|
// SUBB m8 r8
|
|
// SUBB imm8 m8
|
|
// SUBB r8 m8
|
|
// Construct and append a SUBB instruction to the active function.
|
|
// Operates on the global context.
|
|
func SUBB(imr, amr operand.Op) { ctx.SUBB(imr, amr) }
|
|
|
|
// SUBL: Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBL imm32 eax
|
|
// SUBL imm8 r32
|
|
// SUBL imm32 r32
|
|
// SUBL r32 r32
|
|
// SUBL m32 r32
|
|
// SUBL imm8 m32
|
|
// SUBL imm32 m32
|
|
// SUBL r32 m32
|
|
// Construct and append a SUBL instruction to the active function.
|
|
func (c *Context) SUBL(imr, emr operand.Op) {
|
|
if inst, err := x86.SUBL(imr, emr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SUBL: Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBL imm32 eax
|
|
// SUBL imm8 r32
|
|
// SUBL imm32 r32
|
|
// SUBL r32 r32
|
|
// SUBL m32 r32
|
|
// SUBL imm8 m32
|
|
// SUBL imm32 m32
|
|
// SUBL r32 m32
|
|
// Construct and append a SUBL instruction to the active function.
|
|
// Operates on the global context.
|
|
func SUBL(imr, emr operand.Op) { ctx.SUBL(imr, emr) }
|
|
|
|
// SUBPD: Subtract Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBPD xmm xmm
|
|
// SUBPD m128 xmm
|
|
// Construct and append a SUBPD instruction to the active function.
|
|
func (c *Context) SUBPD(mx, x operand.Op) {
|
|
if inst, err := x86.SUBPD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SUBPD: Subtract Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBPD xmm xmm
|
|
// SUBPD m128 xmm
|
|
// Construct and append a SUBPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func SUBPD(mx, x operand.Op) { ctx.SUBPD(mx, x) }
|
|
|
|
// SUBPS: Subtract Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBPS xmm xmm
|
|
// SUBPS m128 xmm
|
|
// Construct and append a SUBPS instruction to the active function.
|
|
func (c *Context) SUBPS(mx, x operand.Op) {
|
|
if inst, err := x86.SUBPS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SUBPS: Subtract Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBPS xmm xmm
|
|
// SUBPS m128 xmm
|
|
// Construct and append a SUBPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func SUBPS(mx, x operand.Op) { ctx.SUBPS(mx, x) }
|
|
|
|
// SUBQ: Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBQ imm32 rax
|
|
// SUBQ imm8 r64
|
|
// SUBQ imm32 r64
|
|
// SUBQ r64 r64
|
|
// SUBQ m64 r64
|
|
// SUBQ imm8 m64
|
|
// SUBQ imm32 m64
|
|
// SUBQ r64 m64
|
|
// Construct and append a SUBQ instruction to the active function.
|
|
func (c *Context) SUBQ(imr, mr operand.Op) {
|
|
if inst, err := x86.SUBQ(imr, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SUBQ: Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBQ imm32 rax
|
|
// SUBQ imm8 r64
|
|
// SUBQ imm32 r64
|
|
// SUBQ r64 r64
|
|
// SUBQ m64 r64
|
|
// SUBQ imm8 m64
|
|
// SUBQ imm32 m64
|
|
// SUBQ r64 m64
|
|
// Construct and append a SUBQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func SUBQ(imr, mr operand.Op) { ctx.SUBQ(imr, mr) }
|
|
|
|
// SUBSD: Subtract Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBSD xmm xmm
|
|
// SUBSD m64 xmm
|
|
// Construct and append a SUBSD instruction to the active function.
|
|
func (c *Context) SUBSD(mx, x operand.Op) {
|
|
if inst, err := x86.SUBSD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SUBSD: Subtract Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBSD xmm xmm
|
|
// SUBSD m64 xmm
|
|
// Construct and append a SUBSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func SUBSD(mx, x operand.Op) { ctx.SUBSD(mx, x) }
|
|
|
|
// SUBSS: Subtract Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBSS xmm xmm
|
|
// SUBSS m32 xmm
|
|
// Construct and append a SUBSS instruction to the active function.
|
|
func (c *Context) SUBSS(mx, x operand.Op) {
|
|
if inst, err := x86.SUBSS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SUBSS: Subtract Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBSS xmm xmm
|
|
// SUBSS m32 xmm
|
|
// Construct and append a SUBSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func SUBSS(mx, x operand.Op) { ctx.SUBSS(mx, x) }
|
|
|
|
// SUBW: Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBW imm16 ax
|
|
// SUBW imm8 r16
|
|
// SUBW imm16 r16
|
|
// SUBW r16 r16
|
|
// SUBW m16 r16
|
|
// SUBW imm8 m16
|
|
// SUBW imm16 m16
|
|
// SUBW r16 m16
|
|
// Construct and append a SUBW instruction to the active function.
|
|
func (c *Context) SUBW(imr, amr operand.Op) {
|
|
if inst, err := x86.SUBW(imr, amr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SUBW: Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBW imm16 ax
|
|
// SUBW imm8 r16
|
|
// SUBW imm16 r16
|
|
// SUBW r16 r16
|
|
// SUBW m16 r16
|
|
// SUBW imm8 m16
|
|
// SUBW imm16 m16
|
|
// SUBW r16 m16
|
|
// Construct and append a SUBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func SUBW(imr, amr operand.Op) { ctx.SUBW(imr, amr) }
|
|
|
|
// SYSCALL: Fast System Call.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SYSCALL
|
|
// Construct and append a SYSCALL instruction to the active function.
|
|
func (c *Context) SYSCALL() {
|
|
if inst, err := x86.SYSCALL(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// SYSCALL: Fast System Call.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SYSCALL
|
|
// Construct and append a SYSCALL instruction to the active function.
|
|
// Operates on the global context.
|
|
func SYSCALL() { ctx.SYSCALL() }
|
|
|
|
// TESTB: Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TESTB imm8 al
|
|
// TESTB imm8 r8
|
|
// TESTB r8 r8
|
|
// TESTB imm8 m8
|
|
// TESTB r8 m8
|
|
// Construct and append a TESTB instruction to the active function.
|
|
func (c *Context) TESTB(ir, amr operand.Op) {
|
|
if inst, err := x86.TESTB(ir, amr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// TESTB: Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TESTB imm8 al
|
|
// TESTB imm8 r8
|
|
// TESTB r8 r8
|
|
// TESTB imm8 m8
|
|
// TESTB r8 m8
|
|
// Construct and append a TESTB instruction to the active function.
|
|
// Operates on the global context.
|
|
func TESTB(ir, amr operand.Op) { ctx.TESTB(ir, amr) }
|
|
|
|
// TESTL: Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TESTL imm32 eax
|
|
// TESTL imm32 r32
|
|
// TESTL r32 r32
|
|
// TESTL imm32 m32
|
|
// TESTL r32 m32
|
|
// Construct and append a TESTL instruction to the active function.
|
|
func (c *Context) TESTL(ir, emr operand.Op) {
|
|
if inst, err := x86.TESTL(ir, emr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// TESTL: Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TESTL imm32 eax
|
|
// TESTL imm32 r32
|
|
// TESTL r32 r32
|
|
// TESTL imm32 m32
|
|
// TESTL r32 m32
|
|
// Construct and append a TESTL instruction to the active function.
|
|
// Operates on the global context.
|
|
func TESTL(ir, emr operand.Op) { ctx.TESTL(ir, emr) }
|
|
|
|
// TESTQ: Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TESTQ imm32 rax
|
|
// TESTQ imm32 r64
|
|
// TESTQ r64 r64
|
|
// TESTQ imm32 m64
|
|
// TESTQ r64 m64
|
|
// Construct and append a TESTQ instruction to the active function.
|
|
func (c *Context) TESTQ(ir, mr operand.Op) {
|
|
if inst, err := x86.TESTQ(ir, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// TESTQ: Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TESTQ imm32 rax
|
|
// TESTQ imm32 r64
|
|
// TESTQ r64 r64
|
|
// TESTQ imm32 m64
|
|
// TESTQ r64 m64
|
|
// Construct and append a TESTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func TESTQ(ir, mr operand.Op) { ctx.TESTQ(ir, mr) }
|
|
|
|
// TESTW: Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TESTW imm16 ax
|
|
// TESTW imm16 r16
|
|
// TESTW r16 r16
|
|
// TESTW imm16 m16
|
|
// TESTW r16 m16
|
|
// Construct and append a TESTW instruction to the active function.
|
|
func (c *Context) TESTW(ir, amr operand.Op) {
|
|
if inst, err := x86.TESTW(ir, amr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// TESTW: Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TESTW imm16 ax
|
|
// TESTW imm16 r16
|
|
// TESTW r16 r16
|
|
// TESTW imm16 m16
|
|
// TESTW r16 m16
|
|
// Construct and append a TESTW instruction to the active function.
|
|
// Operates on the global context.
|
|
func TESTW(ir, amr operand.Op) { ctx.TESTW(ir, amr) }
|
|
|
|
// TZCNTL: Count the Number of Trailing Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TZCNTL r32 r32
|
|
// TZCNTL m32 r32
|
|
// Construct and append a TZCNTL instruction to the active function.
|
|
func (c *Context) TZCNTL(mr, r operand.Op) {
|
|
if inst, err := x86.TZCNTL(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// TZCNTL: Count the Number of Trailing Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TZCNTL r32 r32
|
|
// TZCNTL m32 r32
|
|
// Construct and append a TZCNTL instruction to the active function.
|
|
// Operates on the global context.
|
|
func TZCNTL(mr, r operand.Op) { ctx.TZCNTL(mr, r) }
|
|
|
|
// TZCNTQ: Count the Number of Trailing Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TZCNTQ r64 r64
|
|
// TZCNTQ m64 r64
|
|
// Construct and append a TZCNTQ instruction to the active function.
|
|
func (c *Context) TZCNTQ(mr, r operand.Op) {
|
|
if inst, err := x86.TZCNTQ(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// TZCNTQ: Count the Number of Trailing Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TZCNTQ r64 r64
|
|
// TZCNTQ m64 r64
|
|
// Construct and append a TZCNTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func TZCNTQ(mr, r operand.Op) { ctx.TZCNTQ(mr, r) }
|
|
|
|
// TZCNTW: Count the Number of Trailing Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TZCNTW r16 r16
|
|
// TZCNTW m16 r16
|
|
// Construct and append a TZCNTW instruction to the active function.
|
|
func (c *Context) TZCNTW(mr, r operand.Op) {
|
|
if inst, err := x86.TZCNTW(mr, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// TZCNTW: Count the Number of Trailing Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TZCNTW r16 r16
|
|
// TZCNTW m16 r16
|
|
// Construct and append a TZCNTW instruction to the active function.
|
|
// Operates on the global context.
|
|
func TZCNTW(mr, r operand.Op) { ctx.TZCNTW(mr, r) }
|
|
|
|
// UCOMISD: Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UCOMISD xmm xmm
|
|
// UCOMISD m64 xmm
|
|
// Construct and append a UCOMISD instruction to the active function.
|
|
func (c *Context) UCOMISD(mx, x operand.Op) {
|
|
if inst, err := x86.UCOMISD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// UCOMISD: Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UCOMISD xmm xmm
|
|
// UCOMISD m64 xmm
|
|
// Construct and append a UCOMISD instruction to the active function.
|
|
// Operates on the global context.
|
|
func UCOMISD(mx, x operand.Op) { ctx.UCOMISD(mx, x) }
|
|
|
|
// UCOMISS: Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UCOMISS xmm xmm
|
|
// UCOMISS m32 xmm
|
|
// Construct and append a UCOMISS instruction to the active function.
|
|
func (c *Context) UCOMISS(mx, x operand.Op) {
|
|
if inst, err := x86.UCOMISS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// UCOMISS: Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UCOMISS xmm xmm
|
|
// UCOMISS m32 xmm
|
|
// Construct and append a UCOMISS instruction to the active function.
|
|
// Operates on the global context.
|
|
func UCOMISS(mx, x operand.Op) { ctx.UCOMISS(mx, x) }
|
|
|
|
// UD2: Undefined Instruction.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UD2
|
|
// Construct and append a UD2 instruction to the active function.
|
|
func (c *Context) UD2() {
|
|
if inst, err := x86.UD2(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// UD2: Undefined Instruction.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UD2
|
|
// Construct and append a UD2 instruction to the active function.
|
|
// Operates on the global context.
|
|
func UD2() { ctx.UD2() }
|
|
|
|
// UNPCKHPD: Unpack and Interleave High Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UNPCKHPD xmm xmm
|
|
// UNPCKHPD m128 xmm
|
|
// Construct and append a UNPCKHPD instruction to the active function.
|
|
func (c *Context) UNPCKHPD(mx, x operand.Op) {
|
|
if inst, err := x86.UNPCKHPD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// UNPCKHPD: Unpack and Interleave High Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UNPCKHPD xmm xmm
|
|
// UNPCKHPD m128 xmm
|
|
// Construct and append a UNPCKHPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func UNPCKHPD(mx, x operand.Op) { ctx.UNPCKHPD(mx, x) }
|
|
|
|
// UNPCKHPS: Unpack and Interleave High Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UNPCKHPS xmm xmm
|
|
// UNPCKHPS m128 xmm
|
|
// Construct and append a UNPCKHPS instruction to the active function.
|
|
func (c *Context) UNPCKHPS(mx, x operand.Op) {
|
|
if inst, err := x86.UNPCKHPS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// UNPCKHPS: Unpack and Interleave High Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UNPCKHPS xmm xmm
|
|
// UNPCKHPS m128 xmm
|
|
// Construct and append a UNPCKHPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func UNPCKHPS(mx, x operand.Op) { ctx.UNPCKHPS(mx, x) }
|
|
|
|
// UNPCKLPD: Unpack and Interleave Low Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UNPCKLPD xmm xmm
|
|
// UNPCKLPD m128 xmm
|
|
// Construct and append a UNPCKLPD instruction to the active function.
|
|
func (c *Context) UNPCKLPD(mx, x operand.Op) {
|
|
if inst, err := x86.UNPCKLPD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// UNPCKLPD: Unpack and Interleave Low Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UNPCKLPD xmm xmm
|
|
// UNPCKLPD m128 xmm
|
|
// Construct and append a UNPCKLPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func UNPCKLPD(mx, x operand.Op) { ctx.UNPCKLPD(mx, x) }
|
|
|
|
// UNPCKLPS: Unpack and Interleave Low Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UNPCKLPS xmm xmm
|
|
// UNPCKLPS m128 xmm
|
|
// Construct and append a UNPCKLPS instruction to the active function.
|
|
func (c *Context) UNPCKLPS(mx, x operand.Op) {
|
|
if inst, err := x86.UNPCKLPS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// UNPCKLPS: Unpack and Interleave Low Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UNPCKLPS xmm xmm
|
|
// UNPCKLPS m128 xmm
|
|
// Construct and append a UNPCKLPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func UNPCKLPS(mx, x operand.Op) { ctx.UNPCKLPS(mx, x) }
|
|
|
|
// VADDPD: Add Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD xmm xmm xmm
|
|
// VADDPD m128 xmm xmm
|
|
// VADDPD ymm ymm ymm
|
|
// VADDPD m256 ymm ymm
|
|
// Construct and append a VADDPD instruction to the active function.
|
|
func (c *Context) VADDPD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VADDPD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VADDPD: Add Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD xmm xmm xmm
|
|
// VADDPD m128 xmm xmm
|
|
// VADDPD ymm ymm ymm
|
|
// VADDPD m256 ymm ymm
|
|
// Construct and append a VADDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPD(mxy, xy, xy1 operand.Op) { ctx.VADDPD(mxy, xy, xy1) }
|
|
|
|
// VADDPS: Add Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS xmm xmm xmm
|
|
// VADDPS m128 xmm xmm
|
|
// VADDPS ymm ymm ymm
|
|
// VADDPS m256 ymm ymm
|
|
// Construct and append a VADDPS instruction to the active function.
|
|
func (c *Context) VADDPS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VADDPS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VADDPS: Add Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS xmm xmm xmm
|
|
// VADDPS m128 xmm xmm
|
|
// VADDPS ymm ymm ymm
|
|
// VADDPS m256 ymm ymm
|
|
// Construct and append a VADDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPS(mxy, xy, xy1 operand.Op) { ctx.VADDPS(mxy, xy, xy1) }
|
|
|
|
// VADDSD: Add Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSD xmm xmm xmm
|
|
// VADDSD m64 xmm xmm
|
|
// Construct and append a VADDSD instruction to the active function.
|
|
func (c *Context) VADDSD(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VADDSD(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VADDSD: Add Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSD xmm xmm xmm
|
|
// VADDSD m64 xmm xmm
|
|
// Construct and append a VADDSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSD(mx, x, x1 operand.Op) { ctx.VADDSD(mx, x, x1) }
|
|
|
|
// VADDSS: Add Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSS xmm xmm xmm
|
|
// VADDSS m32 xmm xmm
|
|
// Construct and append a VADDSS instruction to the active function.
|
|
func (c *Context) VADDSS(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VADDSS(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VADDSS: Add Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSS xmm xmm xmm
|
|
// VADDSS m32 xmm xmm
|
|
// Construct and append a VADDSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSS(mx, x, x1 operand.Op) { ctx.VADDSS(mx, x, x1) }
|
|
|
|
// VADDSUBPD: Packed Double-FP Add/Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSUBPD xmm xmm xmm
|
|
// VADDSUBPD m128 xmm xmm
|
|
// VADDSUBPD ymm ymm ymm
|
|
// VADDSUBPD m256 ymm ymm
|
|
// Construct and append a VADDSUBPD instruction to the active function.
|
|
func (c *Context) VADDSUBPD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VADDSUBPD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VADDSUBPD: Packed Double-FP Add/Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSUBPD xmm xmm xmm
|
|
// VADDSUBPD m128 xmm xmm
|
|
// VADDSUBPD ymm ymm ymm
|
|
// VADDSUBPD m256 ymm ymm
|
|
// Construct and append a VADDSUBPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSUBPD(mxy, xy, xy1 operand.Op) { ctx.VADDSUBPD(mxy, xy, xy1) }
|
|
|
|
// VADDSUBPS: Packed Single-FP Add/Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSUBPS xmm xmm xmm
|
|
// VADDSUBPS m128 xmm xmm
|
|
// VADDSUBPS ymm ymm ymm
|
|
// VADDSUBPS m256 ymm ymm
|
|
// Construct and append a VADDSUBPS instruction to the active function.
|
|
func (c *Context) VADDSUBPS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VADDSUBPS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VADDSUBPS: Packed Single-FP Add/Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSUBPS xmm xmm xmm
|
|
// VADDSUBPS m128 xmm xmm
|
|
// VADDSUBPS ymm ymm ymm
|
|
// VADDSUBPS m256 ymm ymm
|
|
// Construct and append a VADDSUBPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSUBPS(mxy, xy, xy1 operand.Op) { ctx.VADDSUBPS(mxy, xy, xy1) }
|
|
|
|
// VAESDEC: Perform One Round of an AES Decryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESDEC xmm xmm xmm
|
|
// VAESDEC m128 xmm xmm
|
|
// Construct and append a VAESDEC instruction to the active function.
|
|
func (c *Context) VAESDEC(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VAESDEC(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VAESDEC: Perform One Round of an AES Decryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESDEC xmm xmm xmm
|
|
// VAESDEC m128 xmm xmm
|
|
// Construct and append a VAESDEC instruction to the active function.
|
|
// Operates on the global context.
|
|
func VAESDEC(mx, x, x1 operand.Op) { ctx.VAESDEC(mx, x, x1) }
|
|
|
|
// VAESDECLAST: Perform Last Round of an AES Decryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESDECLAST xmm xmm xmm
|
|
// VAESDECLAST m128 xmm xmm
|
|
// Construct and append a VAESDECLAST instruction to the active function.
|
|
func (c *Context) VAESDECLAST(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VAESDECLAST(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VAESDECLAST: Perform Last Round of an AES Decryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESDECLAST xmm xmm xmm
|
|
// VAESDECLAST m128 xmm xmm
|
|
// Construct and append a VAESDECLAST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VAESDECLAST(mx, x, x1 operand.Op) { ctx.VAESDECLAST(mx, x, x1) }
|
|
|
|
// VAESENC: Perform One Round of an AES Encryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESENC xmm xmm xmm
|
|
// VAESENC m128 xmm xmm
|
|
// Construct and append a VAESENC instruction to the active function.
|
|
func (c *Context) VAESENC(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VAESENC(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VAESENC: Perform One Round of an AES Encryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESENC xmm xmm xmm
|
|
// VAESENC m128 xmm xmm
|
|
// Construct and append a VAESENC instruction to the active function.
|
|
// Operates on the global context.
|
|
func VAESENC(mx, x, x1 operand.Op) { ctx.VAESENC(mx, x, x1) }
|
|
|
|
// VAESENCLAST: Perform Last Round of an AES Encryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESENCLAST xmm xmm xmm
|
|
// VAESENCLAST m128 xmm xmm
|
|
// Construct and append a VAESENCLAST instruction to the active function.
|
|
func (c *Context) VAESENCLAST(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VAESENCLAST(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VAESENCLAST: Perform Last Round of an AES Encryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESENCLAST xmm xmm xmm
|
|
// VAESENCLAST m128 xmm xmm
|
|
// Construct and append a VAESENCLAST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VAESENCLAST(mx, x, x1 operand.Op) { ctx.VAESENCLAST(mx, x, x1) }
|
|
|
|
// VAESIMC: Perform the AES InvMixColumn Transformation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESIMC xmm xmm
|
|
// VAESIMC m128 xmm
|
|
// Construct and append a VAESIMC instruction to the active function.
|
|
func (c *Context) VAESIMC(mx, x operand.Op) {
|
|
if inst, err := x86.VAESIMC(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VAESIMC: Perform the AES InvMixColumn Transformation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESIMC xmm xmm
|
|
// VAESIMC m128 xmm
|
|
// Construct and append a VAESIMC instruction to the active function.
|
|
// Operates on the global context.
|
|
func VAESIMC(mx, x operand.Op) { ctx.VAESIMC(mx, x) }
|
|
|
|
// VAESKEYGENASSIST: AES Round Key Generation Assist.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESKEYGENASSIST imm8 xmm xmm
|
|
// VAESKEYGENASSIST imm8 m128 xmm
|
|
// Construct and append a VAESKEYGENASSIST instruction to the active function.
|
|
func (c *Context) VAESKEYGENASSIST(i, mx, x operand.Op) {
|
|
if inst, err := x86.VAESKEYGENASSIST(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VAESKEYGENASSIST: AES Round Key Generation Assist.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESKEYGENASSIST imm8 xmm xmm
|
|
// VAESKEYGENASSIST imm8 m128 xmm
|
|
// Construct and append a VAESKEYGENASSIST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VAESKEYGENASSIST(i, mx, x operand.Op) { ctx.VAESKEYGENASSIST(i, mx, x) }
|
|
|
|
// VANDNPD: Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDNPD xmm xmm xmm
|
|
// VANDNPD m128 xmm xmm
|
|
// VANDNPD ymm ymm ymm
|
|
// VANDNPD m256 ymm ymm
|
|
// Construct and append a VANDNPD instruction to the active function.
|
|
func (c *Context) VANDNPD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VANDNPD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VANDNPD: Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDNPD xmm xmm xmm
|
|
// VANDNPD m128 xmm xmm
|
|
// VANDNPD ymm ymm ymm
|
|
// VANDNPD m256 ymm ymm
|
|
// Construct and append a VANDNPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VANDNPD(mxy, xy, xy1 operand.Op) { ctx.VANDNPD(mxy, xy, xy1) }
|
|
|
|
// VANDNPS: Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDNPS xmm xmm xmm
|
|
// VANDNPS m128 xmm xmm
|
|
// VANDNPS ymm ymm ymm
|
|
// VANDNPS m256 ymm ymm
|
|
// Construct and append a VANDNPS instruction to the active function.
|
|
func (c *Context) VANDNPS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VANDNPS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VANDNPS: Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDNPS xmm xmm xmm
|
|
// VANDNPS m128 xmm xmm
|
|
// VANDNPS ymm ymm ymm
|
|
// VANDNPS m256 ymm ymm
|
|
// Construct and append a VANDNPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VANDNPS(mxy, xy, xy1 operand.Op) { ctx.VANDNPS(mxy, xy, xy1) }
|
|
|
|
// VANDPD: Bitwise Logical AND of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDPD xmm xmm xmm
|
|
// VANDPD m128 xmm xmm
|
|
// VANDPD ymm ymm ymm
|
|
// VANDPD m256 ymm ymm
|
|
// Construct and append a VANDPD instruction to the active function.
|
|
func (c *Context) VANDPD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VANDPD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VANDPD: Bitwise Logical AND of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDPD xmm xmm xmm
|
|
// VANDPD m128 xmm xmm
|
|
// VANDPD ymm ymm ymm
|
|
// VANDPD m256 ymm ymm
|
|
// Construct and append a VANDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VANDPD(mxy, xy, xy1 operand.Op) { ctx.VANDPD(mxy, xy, xy1) }
|
|
|
|
// VANDPS: Bitwise Logical AND of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDPS xmm xmm xmm
|
|
// VANDPS m128 xmm xmm
|
|
// VANDPS ymm ymm ymm
|
|
// VANDPS m256 ymm ymm
|
|
// Construct and append a VANDPS instruction to the active function.
|
|
func (c *Context) VANDPS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VANDPS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VANDPS: Bitwise Logical AND of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDPS xmm xmm xmm
|
|
// VANDPS m128 xmm xmm
|
|
// VANDPS ymm ymm ymm
|
|
// VANDPS m256 ymm ymm
|
|
// Construct and append a VANDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VANDPS(mxy, xy, xy1 operand.Op) { ctx.VANDPS(mxy, xy, xy1) }
|
|
|
|
// VBLENDPD: Blend Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDPD imm8 xmm xmm xmm
|
|
// VBLENDPD imm8 m128 xmm xmm
|
|
// VBLENDPD imm8 ymm ymm ymm
|
|
// VBLENDPD imm8 m256 ymm ymm
|
|
// Construct and append a VBLENDPD instruction to the active function.
|
|
func (c *Context) VBLENDPD(i, mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VBLENDPD(i, mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VBLENDPD: Blend Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDPD imm8 xmm xmm xmm
|
|
// VBLENDPD imm8 m128 xmm xmm
|
|
// VBLENDPD imm8 ymm ymm ymm
|
|
// VBLENDPD imm8 m256 ymm ymm
|
|
// Construct and append a VBLENDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBLENDPD(i, mxy, xy, xy1 operand.Op) { ctx.VBLENDPD(i, mxy, xy, xy1) }
|
|
|
|
// VBLENDPS: Blend Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDPS imm8 xmm xmm xmm
|
|
// VBLENDPS imm8 m128 xmm xmm
|
|
// VBLENDPS imm8 ymm ymm ymm
|
|
// VBLENDPS imm8 m256 ymm ymm
|
|
// Construct and append a VBLENDPS instruction to the active function.
|
|
func (c *Context) VBLENDPS(i, mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VBLENDPS(i, mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VBLENDPS: Blend Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDPS imm8 xmm xmm xmm
|
|
// VBLENDPS imm8 m128 xmm xmm
|
|
// VBLENDPS imm8 ymm ymm ymm
|
|
// VBLENDPS imm8 m256 ymm ymm
|
|
// Construct and append a VBLENDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBLENDPS(i, mxy, xy, xy1 operand.Op) { ctx.VBLENDPS(i, mxy, xy, xy1) }
|
|
|
|
// VBLENDVPD: Variable Blend Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDVPD xmm xmm xmm xmm
|
|
// VBLENDVPD xmm m128 xmm xmm
|
|
// VBLENDVPD ymm ymm ymm ymm
|
|
// VBLENDVPD ymm m256 ymm ymm
|
|
// Construct and append a VBLENDVPD instruction to the active function.
|
|
func (c *Context) VBLENDVPD(xy, mxy, xy1, xy2 operand.Op) {
|
|
if inst, err := x86.VBLENDVPD(xy, mxy, xy1, xy2); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VBLENDVPD: Variable Blend Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDVPD xmm xmm xmm xmm
|
|
// VBLENDVPD xmm m128 xmm xmm
|
|
// VBLENDVPD ymm ymm ymm ymm
|
|
// VBLENDVPD ymm m256 ymm ymm
|
|
// Construct and append a VBLENDVPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBLENDVPD(xy, mxy, xy1, xy2 operand.Op) { ctx.VBLENDVPD(xy, mxy, xy1, xy2) }
|
|
|
|
// VBLENDVPS: Variable Blend Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDVPS xmm xmm xmm xmm
|
|
// VBLENDVPS xmm m128 xmm xmm
|
|
// VBLENDVPS ymm ymm ymm ymm
|
|
// VBLENDVPS ymm m256 ymm ymm
|
|
// Construct and append a VBLENDVPS instruction to the active function.
|
|
func (c *Context) VBLENDVPS(xy, mxy, xy1, xy2 operand.Op) {
|
|
if inst, err := x86.VBLENDVPS(xy, mxy, xy1, xy2); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VBLENDVPS: Variable Blend Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDVPS xmm xmm xmm xmm
|
|
// VBLENDVPS xmm m128 xmm xmm
|
|
// VBLENDVPS ymm ymm ymm ymm
|
|
// VBLENDVPS ymm m256 ymm ymm
|
|
// Construct and append a VBLENDVPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBLENDVPS(xy, mxy, xy1, xy2 operand.Op) { ctx.VBLENDVPS(xy, mxy, xy1, xy2) }
|
|
|
|
// VBROADCASTF128: Broadcast 128 Bit of Floating-Point Data.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF128 m128 ymm
|
|
// Construct and append a VBROADCASTF128 instruction to the active function.
|
|
func (c *Context) VBROADCASTF128(m, y operand.Op) {
|
|
if inst, err := x86.VBROADCASTF128(m, y); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VBROADCASTF128: Broadcast 128 Bit of Floating-Point Data.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF128 m128 ymm
|
|
// Construct and append a VBROADCASTF128 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTF128(m, y operand.Op) { ctx.VBROADCASTF128(m, y) }
|
|
|
|
// VBROADCASTI128: Broadcast 128 Bits of Integer Data.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI128 m128 ymm
|
|
// Construct and append a VBROADCASTI128 instruction to the active function.
|
|
func (c *Context) VBROADCASTI128(m, y operand.Op) {
|
|
if inst, err := x86.VBROADCASTI128(m, y); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VBROADCASTI128: Broadcast 128 Bits of Integer Data.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI128 m128 ymm
|
|
// Construct and append a VBROADCASTI128 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTI128(m, y operand.Op) { ctx.VBROADCASTI128(m, y) }
|
|
|
|
// VBROADCASTSD: Broadcast Double-Precision Floating-Point Element.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTSD xmm ymm
|
|
// VBROADCASTSD m64 ymm
|
|
// Construct and append a VBROADCASTSD instruction to the active function.
|
|
func (c *Context) VBROADCASTSD(mx, y operand.Op) {
|
|
if inst, err := x86.VBROADCASTSD(mx, y); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VBROADCASTSD: Broadcast Double-Precision Floating-Point Element.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTSD xmm ymm
|
|
// VBROADCASTSD m64 ymm
|
|
// Construct and append a VBROADCASTSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTSD(mx, y operand.Op) { ctx.VBROADCASTSD(mx, y) }
|
|
|
|
// VBROADCASTSS: Broadcast Single-Precision Floating-Point Element.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTSS xmm xmm
|
|
// VBROADCASTSS m32 xmm
|
|
// VBROADCASTSS xmm ymm
|
|
// VBROADCASTSS m32 ymm
|
|
// Construct and append a VBROADCASTSS instruction to the active function.
|
|
func (c *Context) VBROADCASTSS(mx, xy operand.Op) {
|
|
if inst, err := x86.VBROADCASTSS(mx, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VBROADCASTSS: Broadcast Single-Precision Floating-Point Element.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTSS xmm xmm
|
|
// VBROADCASTSS m32 xmm
|
|
// VBROADCASTSS xmm ymm
|
|
// VBROADCASTSS m32 ymm
|
|
// Construct and append a VBROADCASTSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTSS(mx, xy operand.Op) { ctx.VBROADCASTSS(mx, xy) }
|
|
|
|
// VCMPPD: Compare Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPPD imm8 xmm xmm xmm
|
|
// VCMPPD imm8 m128 xmm xmm
|
|
// VCMPPD imm8 ymm ymm ymm
|
|
// VCMPPD imm8 m256 ymm ymm
|
|
// Construct and append a VCMPPD instruction to the active function.
|
|
func (c *Context) VCMPPD(i, mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VCMPPD(i, mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCMPPD: Compare Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPPD imm8 xmm xmm xmm
|
|
// VCMPPD imm8 m128 xmm xmm
|
|
// VCMPPD imm8 ymm ymm ymm
|
|
// VCMPPD imm8 m256 ymm ymm
|
|
// Construct and append a VCMPPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCMPPD(i, mxy, xy, xy1 operand.Op) { ctx.VCMPPD(i, mxy, xy, xy1) }
|
|
|
|
// VCMPPS: Compare Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPPS imm8 xmm xmm xmm
|
|
// VCMPPS imm8 m128 xmm xmm
|
|
// VCMPPS imm8 ymm ymm ymm
|
|
// VCMPPS imm8 m256 ymm ymm
|
|
// Construct and append a VCMPPS instruction to the active function.
|
|
func (c *Context) VCMPPS(i, mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VCMPPS(i, mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCMPPS: Compare Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPPS imm8 xmm xmm xmm
|
|
// VCMPPS imm8 m128 xmm xmm
|
|
// VCMPPS imm8 ymm ymm ymm
|
|
// VCMPPS imm8 m256 ymm ymm
|
|
// Construct and append a VCMPPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCMPPS(i, mxy, xy, xy1 operand.Op) { ctx.VCMPPS(i, mxy, xy, xy1) }
|
|
|
|
// VCMPSD: Compare Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPSD imm8 xmm xmm xmm
|
|
// VCMPSD imm8 m64 xmm xmm
|
|
// Construct and append a VCMPSD instruction to the active function.
|
|
func (c *Context) VCMPSD(i, mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VCMPSD(i, mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCMPSD: Compare Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPSD imm8 xmm xmm xmm
|
|
// VCMPSD imm8 m64 xmm xmm
|
|
// Construct and append a VCMPSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCMPSD(i, mx, x, x1 operand.Op) { ctx.VCMPSD(i, mx, x, x1) }
|
|
|
|
// VCMPSS: Compare Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPSS imm8 xmm xmm xmm
|
|
// VCMPSS imm8 m32 xmm xmm
|
|
// Construct and append a VCMPSS instruction to the active function.
|
|
func (c *Context) VCMPSS(i, mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VCMPSS(i, mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCMPSS: Compare Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPSS imm8 xmm xmm xmm
|
|
// VCMPSS imm8 m32 xmm xmm
|
|
// Construct and append a VCMPSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCMPSS(i, mx, x, x1 operand.Op) { ctx.VCMPSS(i, mx, x, x1) }
|
|
|
|
// VCOMISD: Compare Scalar Ordered Double-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCOMISD xmm xmm
|
|
// VCOMISD m64 xmm
|
|
// Construct and append a VCOMISD instruction to the active function.
|
|
func (c *Context) VCOMISD(mx, x operand.Op) {
|
|
if inst, err := x86.VCOMISD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCOMISD: Compare Scalar Ordered Double-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCOMISD xmm xmm
|
|
// VCOMISD m64 xmm
|
|
// Construct and append a VCOMISD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCOMISD(mx, x operand.Op) { ctx.VCOMISD(mx, x) }
|
|
|
|
// VCOMISS: Compare Scalar Ordered Single-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCOMISS xmm xmm
|
|
// VCOMISS m32 xmm
|
|
// Construct and append a VCOMISS instruction to the active function.
|
|
func (c *Context) VCOMISS(mx, x operand.Op) {
|
|
if inst, err := x86.VCOMISS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCOMISS: Compare Scalar Ordered Single-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCOMISS xmm xmm
|
|
// VCOMISS m32 xmm
|
|
// Construct and append a VCOMISS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCOMISS(mx, x operand.Op) { ctx.VCOMISS(mx, x) }
|
|
|
|
// VCVTDQ2PD: Convert Packed Dword Integers to Packed Double-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PD xmm xmm
|
|
// VCVTDQ2PD m64 xmm
|
|
// VCVTDQ2PD xmm ymm
|
|
// VCVTDQ2PD m128 ymm
|
|
// Construct and append a VCVTDQ2PD instruction to the active function.
|
|
func (c *Context) VCVTDQ2PD(mx, xy operand.Op) {
|
|
if inst, err := x86.VCVTDQ2PD(mx, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTDQ2PD: Convert Packed Dword Integers to Packed Double-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PD xmm xmm
|
|
// VCVTDQ2PD m64 xmm
|
|
// VCVTDQ2PD xmm ymm
|
|
// VCVTDQ2PD m128 ymm
|
|
// Construct and append a VCVTDQ2PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTDQ2PD(mx, xy operand.Op) { ctx.VCVTDQ2PD(mx, xy) }
|
|
|
|
// VCVTDQ2PS: Convert Packed Dword Integers to Packed Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS xmm xmm
|
|
// VCVTDQ2PS m128 xmm
|
|
// VCVTDQ2PS ymm ymm
|
|
// VCVTDQ2PS m256 ymm
|
|
// Construct and append a VCVTDQ2PS instruction to the active function.
|
|
func (c *Context) VCVTDQ2PS(mxy, xy operand.Op) {
|
|
if inst, err := x86.VCVTDQ2PS(mxy, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTDQ2PS: Convert Packed Dword Integers to Packed Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS xmm xmm
|
|
// VCVTDQ2PS m128 xmm
|
|
// VCVTDQ2PS ymm ymm
|
|
// VCVTDQ2PS m256 ymm
|
|
// Construct and append a VCVTDQ2PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTDQ2PS(mxy, xy operand.Op) { ctx.VCVTDQ2PS(mxy, xy) }
|
|
|
|
// VCVTPD2DQX: Convert Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQX xmm xmm
|
|
// VCVTPD2DQX m128 xmm
|
|
// Construct and append a VCVTPD2DQX instruction to the active function.
|
|
func (c *Context) VCVTPD2DQX(mx, x operand.Op) {
|
|
if inst, err := x86.VCVTPD2DQX(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTPD2DQX: Convert Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQX xmm xmm
|
|
// VCVTPD2DQX m128 xmm
|
|
// Construct and append a VCVTPD2DQX instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2DQX(mx, x operand.Op) { ctx.VCVTPD2DQX(mx, x) }
|
|
|
|
// VCVTPD2DQY: Convert Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQY ymm xmm
|
|
// VCVTPD2DQY m256 xmm
|
|
// Construct and append a VCVTPD2DQY instruction to the active function.
|
|
func (c *Context) VCVTPD2DQY(my, x operand.Op) {
|
|
if inst, err := x86.VCVTPD2DQY(my, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTPD2DQY: Convert Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQY ymm xmm
|
|
// VCVTPD2DQY m256 xmm
|
|
// Construct and append a VCVTPD2DQY instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2DQY(my, x operand.Op) { ctx.VCVTPD2DQY(my, x) }
|
|
|
|
// VCVTPD2PSX: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PSX xmm xmm
|
|
// VCVTPD2PSX m128 xmm
|
|
// Construct and append a VCVTPD2PSX instruction to the active function.
|
|
func (c *Context) VCVTPD2PSX(mx, x operand.Op) {
|
|
if inst, err := x86.VCVTPD2PSX(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTPD2PSX: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PSX xmm xmm
|
|
// VCVTPD2PSX m128 xmm
|
|
// Construct and append a VCVTPD2PSX instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2PSX(mx, x operand.Op) { ctx.VCVTPD2PSX(mx, x) }
|
|
|
|
// VCVTPD2PSY: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PSY ymm xmm
|
|
// VCVTPD2PSY m256 xmm
|
|
// Construct and append a VCVTPD2PSY instruction to the active function.
|
|
func (c *Context) VCVTPD2PSY(my, x operand.Op) {
|
|
if inst, err := x86.VCVTPD2PSY(my, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTPD2PSY: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PSY ymm xmm
|
|
// VCVTPD2PSY m256 xmm
|
|
// Construct and append a VCVTPD2PSY instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2PSY(my, x operand.Op) { ctx.VCVTPD2PSY(my, x) }
|
|
|
|
// VCVTPH2PS: Convert Half-Precision FP Values to Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPH2PS xmm xmm
|
|
// VCVTPH2PS m64 xmm
|
|
// VCVTPH2PS xmm ymm
|
|
// VCVTPH2PS m128 ymm
|
|
// Construct and append a VCVTPH2PS instruction to the active function.
|
|
func (c *Context) VCVTPH2PS(mx, xy operand.Op) {
|
|
if inst, err := x86.VCVTPH2PS(mx, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTPH2PS: Convert Half-Precision FP Values to Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPH2PS xmm xmm
|
|
// VCVTPH2PS m64 xmm
|
|
// VCVTPH2PS xmm ymm
|
|
// VCVTPH2PS m128 ymm
|
|
// Construct and append a VCVTPH2PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPH2PS(mx, xy operand.Op) { ctx.VCVTPH2PS(mx, xy) }
|
|
|
|
// VCVTPS2DQ: Convert Packed Single-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ xmm xmm
|
|
// VCVTPS2DQ m128 xmm
|
|
// VCVTPS2DQ ymm ymm
|
|
// VCVTPS2DQ m256 ymm
|
|
// Construct and append a VCVTPS2DQ instruction to the active function.
|
|
func (c *Context) VCVTPS2DQ(mxy, xy operand.Op) {
|
|
if inst, err := x86.VCVTPS2DQ(mxy, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTPS2DQ: Convert Packed Single-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ xmm xmm
|
|
// VCVTPS2DQ m128 xmm
|
|
// VCVTPS2DQ ymm ymm
|
|
// VCVTPS2DQ m256 ymm
|
|
// Construct and append a VCVTPS2DQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2DQ(mxy, xy operand.Op) { ctx.VCVTPS2DQ(mxy, xy) }
|
|
|
|
// VCVTPS2PD: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PD xmm xmm
|
|
// VCVTPS2PD m64 xmm
|
|
// VCVTPS2PD xmm ymm
|
|
// VCVTPS2PD m128 ymm
|
|
// Construct and append a VCVTPS2PD instruction to the active function.
|
|
func (c *Context) VCVTPS2PD(mx, xy operand.Op) {
|
|
if inst, err := x86.VCVTPS2PD(mx, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTPS2PD: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PD xmm xmm
|
|
// VCVTPS2PD m64 xmm
|
|
// VCVTPS2PD xmm ymm
|
|
// VCVTPS2PD m128 ymm
|
|
// Construct and append a VCVTPS2PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2PD(mx, xy operand.Op) { ctx.VCVTPS2PD(mx, xy) }
|
|
|
|
// VCVTPS2PH: Convert Single-Precision FP value to Half-Precision FP value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PH imm8 xmm xmm
|
|
// VCVTPS2PH imm8 ymm xmm
|
|
// VCVTPS2PH imm8 xmm m64
|
|
// VCVTPS2PH imm8 ymm m128
|
|
// Construct and append a VCVTPS2PH instruction to the active function.
|
|
func (c *Context) VCVTPS2PH(i, xy, mx operand.Op) {
|
|
if inst, err := x86.VCVTPS2PH(i, xy, mx); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTPS2PH: Convert Single-Precision FP value to Half-Precision FP value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PH imm8 xmm xmm
|
|
// VCVTPS2PH imm8 ymm xmm
|
|
// VCVTPS2PH imm8 xmm m64
|
|
// VCVTPS2PH imm8 ymm m128
|
|
// Construct and append a VCVTPS2PH instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2PH(i, xy, mx operand.Op) { ctx.VCVTPS2PH(i, xy, mx) }
|
|
|
|
// VCVTSD2SI: Convert Scalar Double-Precision FP Value to Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SI xmm r32
|
|
// VCVTSD2SI m64 r32
|
|
// Construct and append a VCVTSD2SI instruction to the active function.
|
|
func (c *Context) VCVTSD2SI(mx, r operand.Op) {
|
|
if inst, err := x86.VCVTSD2SI(mx, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTSD2SI: Convert Scalar Double-Precision FP Value to Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SI xmm r32
|
|
// VCVTSD2SI m64 r32
|
|
// Construct and append a VCVTSD2SI instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2SI(mx, r operand.Op) { ctx.VCVTSD2SI(mx, r) }
|
|
|
|
// VCVTSD2SIQ: Convert Scalar Double-Precision FP Value to Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SIQ xmm r64
|
|
// VCVTSD2SIQ m64 r64
|
|
// Construct and append a VCVTSD2SIQ instruction to the active function.
|
|
func (c *Context) VCVTSD2SIQ(mx, r operand.Op) {
|
|
if inst, err := x86.VCVTSD2SIQ(mx, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTSD2SIQ: Convert Scalar Double-Precision FP Value to Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SIQ xmm r64
|
|
// VCVTSD2SIQ m64 r64
|
|
// Construct and append a VCVTSD2SIQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2SIQ(mx, r operand.Op) { ctx.VCVTSD2SIQ(mx, r) }
|
|
|
|
// VCVTSD2SS: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SS xmm xmm xmm
|
|
// VCVTSD2SS m64 xmm xmm
|
|
// Construct and append a VCVTSD2SS instruction to the active function.
|
|
func (c *Context) VCVTSD2SS(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VCVTSD2SS(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTSD2SS: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SS xmm xmm xmm
|
|
// VCVTSD2SS m64 xmm xmm
|
|
// Construct and append a VCVTSD2SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2SS(mx, x, x1 operand.Op) { ctx.VCVTSD2SS(mx, x, x1) }
|
|
|
|
// VCVTSI2SDL: Convert Dword Integer to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SDL r32 xmm xmm
|
|
// VCVTSI2SDL m32 xmm xmm
|
|
// Construct and append a VCVTSI2SDL instruction to the active function.
|
|
func (c *Context) VCVTSI2SDL(mr, x, x1 operand.Op) {
|
|
if inst, err := x86.VCVTSI2SDL(mr, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTSI2SDL: Convert Dword Integer to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SDL r32 xmm xmm
|
|
// VCVTSI2SDL m32 xmm xmm
|
|
// Construct and append a VCVTSI2SDL instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSI2SDL(mr, x, x1 operand.Op) { ctx.VCVTSI2SDL(mr, x, x1) }
|
|
|
|
// VCVTSI2SDQ: Convert Dword Integer to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SDQ r64 xmm xmm
|
|
// VCVTSI2SDQ m64 xmm xmm
|
|
// Construct and append a VCVTSI2SDQ instruction to the active function.
|
|
func (c *Context) VCVTSI2SDQ(mr, x, x1 operand.Op) {
|
|
if inst, err := x86.VCVTSI2SDQ(mr, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTSI2SDQ: Convert Dword Integer to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SDQ r64 xmm xmm
|
|
// VCVTSI2SDQ m64 xmm xmm
|
|
// Construct and append a VCVTSI2SDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSI2SDQ(mr, x, x1 operand.Op) { ctx.VCVTSI2SDQ(mr, x, x1) }
|
|
|
|
// VCVTSI2SSL: Convert Dword Integer to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSL r32 xmm xmm
|
|
// VCVTSI2SSL m32 xmm xmm
|
|
// Construct and append a VCVTSI2SSL instruction to the active function.
|
|
func (c *Context) VCVTSI2SSL(mr, x, x1 operand.Op) {
|
|
if inst, err := x86.VCVTSI2SSL(mr, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTSI2SSL: Convert Dword Integer to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSL r32 xmm xmm
|
|
// VCVTSI2SSL m32 xmm xmm
|
|
// Construct and append a VCVTSI2SSL instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSI2SSL(mr, x, x1 operand.Op) { ctx.VCVTSI2SSL(mr, x, x1) }
|
|
|
|
// VCVTSI2SSQ: Convert Dword Integer to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSQ r64 xmm xmm
|
|
// VCVTSI2SSQ m64 xmm xmm
|
|
// Construct and append a VCVTSI2SSQ instruction to the active function.
|
|
func (c *Context) VCVTSI2SSQ(mr, x, x1 operand.Op) {
|
|
if inst, err := x86.VCVTSI2SSQ(mr, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTSI2SSQ: Convert Dword Integer to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSQ r64 xmm xmm
|
|
// VCVTSI2SSQ m64 xmm xmm
|
|
// Construct and append a VCVTSI2SSQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSI2SSQ(mr, x, x1 operand.Op) { ctx.VCVTSI2SSQ(mr, x, x1) }
|
|
|
|
// VCVTSS2SD: Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SD xmm xmm xmm
|
|
// VCVTSS2SD m32 xmm xmm
|
|
// Construct and append a VCVTSS2SD instruction to the active function.
|
|
func (c *Context) VCVTSS2SD(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VCVTSS2SD(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTSS2SD: Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SD xmm xmm xmm
|
|
// VCVTSS2SD m32 xmm xmm
|
|
// Construct and append a VCVTSS2SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2SD(mx, x, x1 operand.Op) { ctx.VCVTSS2SD(mx, x, x1) }
|
|
|
|
// VCVTSS2SI: Convert Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SI xmm r32
|
|
// VCVTSS2SI m32 r32
|
|
// Construct and append a VCVTSS2SI instruction to the active function.
|
|
func (c *Context) VCVTSS2SI(mx, r operand.Op) {
|
|
if inst, err := x86.VCVTSS2SI(mx, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTSS2SI: Convert Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SI xmm r32
|
|
// VCVTSS2SI m32 r32
|
|
// Construct and append a VCVTSS2SI instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2SI(mx, r operand.Op) { ctx.VCVTSS2SI(mx, r) }
|
|
|
|
// VCVTSS2SIQ: Convert Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SIQ xmm r64
|
|
// VCVTSS2SIQ m32 r64
|
|
// Construct and append a VCVTSS2SIQ instruction to the active function.
|
|
func (c *Context) VCVTSS2SIQ(mx, r operand.Op) {
|
|
if inst, err := x86.VCVTSS2SIQ(mx, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTSS2SIQ: Convert Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SIQ xmm r64
|
|
// VCVTSS2SIQ m32 r64
|
|
// Construct and append a VCVTSS2SIQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2SIQ(mx, r operand.Op) { ctx.VCVTSS2SIQ(mx, r) }
|
|
|
|
// VCVTTPD2DQX: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQX xmm xmm
|
|
// VCVTTPD2DQX m128 xmm
|
|
// Construct and append a VCVTTPD2DQX instruction to the active function.
|
|
func (c *Context) VCVTTPD2DQX(mx, x operand.Op) {
|
|
if inst, err := x86.VCVTTPD2DQX(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTTPD2DQX: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQX xmm xmm
|
|
// VCVTTPD2DQX m128 xmm
|
|
// Construct and append a VCVTTPD2DQX instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2DQX(mx, x operand.Op) { ctx.VCVTTPD2DQX(mx, x) }
|
|
|
|
// VCVTTPD2DQY: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQY ymm xmm
|
|
// VCVTTPD2DQY m256 xmm
|
|
// Construct and append a VCVTTPD2DQY instruction to the active function.
|
|
func (c *Context) VCVTTPD2DQY(my, x operand.Op) {
|
|
if inst, err := x86.VCVTTPD2DQY(my, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTTPD2DQY: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQY ymm xmm
|
|
// VCVTTPD2DQY m256 xmm
|
|
// Construct and append a VCVTTPD2DQY instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2DQY(my, x operand.Op) { ctx.VCVTTPD2DQY(my, x) }
|
|
|
|
// VCVTTPS2DQ: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2DQ xmm xmm
|
|
// VCVTTPS2DQ m128 xmm
|
|
// VCVTTPS2DQ ymm ymm
|
|
// VCVTTPS2DQ m256 ymm
|
|
// Construct and append a VCVTTPS2DQ instruction to the active function.
|
|
func (c *Context) VCVTTPS2DQ(mxy, xy operand.Op) {
|
|
if inst, err := x86.VCVTTPS2DQ(mxy, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTTPS2DQ: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2DQ xmm xmm
|
|
// VCVTTPS2DQ m128 xmm
|
|
// VCVTTPS2DQ ymm ymm
|
|
// VCVTTPS2DQ m256 ymm
|
|
// Construct and append a VCVTTPS2DQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2DQ(mxy, xy operand.Op) { ctx.VCVTTPS2DQ(mxy, xy) }
|
|
|
|
// VCVTTSD2SI: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSD2SI xmm r32
|
|
// VCVTTSD2SI m64 r32
|
|
// Construct and append a VCVTTSD2SI instruction to the active function.
|
|
func (c *Context) VCVTTSD2SI(mx, r operand.Op) {
|
|
if inst, err := x86.VCVTTSD2SI(mx, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTTSD2SI: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSD2SI xmm r32
|
|
// VCVTTSD2SI m64 r32
|
|
// Construct and append a VCVTTSD2SI instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTSD2SI(mx, r operand.Op) { ctx.VCVTTSD2SI(mx, r) }
|
|
|
|
// VCVTTSD2SIQ: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSD2SIQ xmm r64
|
|
// VCVTTSD2SIQ m64 r64
|
|
// Construct and append a VCVTTSD2SIQ instruction to the active function.
|
|
func (c *Context) VCVTTSD2SIQ(mx, r operand.Op) {
|
|
if inst, err := x86.VCVTTSD2SIQ(mx, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTTSD2SIQ: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSD2SIQ xmm r64
|
|
// VCVTTSD2SIQ m64 r64
|
|
// Construct and append a VCVTTSD2SIQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTSD2SIQ(mx, r operand.Op) { ctx.VCVTTSD2SIQ(mx, r) }
|
|
|
|
// VCVTTSS2SI: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSS2SI xmm r32
|
|
// VCVTTSS2SI m32 r32
|
|
// Construct and append a VCVTTSS2SI instruction to the active function.
|
|
func (c *Context) VCVTTSS2SI(mx, r operand.Op) {
|
|
if inst, err := x86.VCVTTSS2SI(mx, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTTSS2SI: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSS2SI xmm r32
|
|
// VCVTTSS2SI m32 r32
|
|
// Construct and append a VCVTTSS2SI instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTSS2SI(mx, r operand.Op) { ctx.VCVTTSS2SI(mx, r) }
|
|
|
|
// VCVTTSS2SIQ: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSS2SIQ xmm r64
|
|
// VCVTTSS2SIQ m32 r64
|
|
// Construct and append a VCVTTSS2SIQ instruction to the active function.
|
|
func (c *Context) VCVTTSS2SIQ(mx, r operand.Op) {
|
|
if inst, err := x86.VCVTTSS2SIQ(mx, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VCVTTSS2SIQ: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSS2SIQ xmm r64
|
|
// VCVTTSS2SIQ m32 r64
|
|
// Construct and append a VCVTTSS2SIQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTSS2SIQ(mx, r operand.Op) { ctx.VCVTTSS2SIQ(mx, r) }
|
|
|
|
// VDIVPD: Divide Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD xmm xmm xmm
|
|
// VDIVPD m128 xmm xmm
|
|
// VDIVPD ymm ymm ymm
|
|
// VDIVPD m256 ymm ymm
|
|
// Construct and append a VDIVPD instruction to the active function.
|
|
func (c *Context) VDIVPD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VDIVPD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VDIVPD: Divide Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD xmm xmm xmm
|
|
// VDIVPD m128 xmm xmm
|
|
// VDIVPD ymm ymm ymm
|
|
// VDIVPD m256 ymm ymm
|
|
// Construct and append a VDIVPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPD(mxy, xy, xy1 operand.Op) { ctx.VDIVPD(mxy, xy, xy1) }
|
|
|
|
// VDIVPS: Divide Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS xmm xmm xmm
|
|
// VDIVPS m128 xmm xmm
|
|
// VDIVPS ymm ymm ymm
|
|
// VDIVPS m256 ymm ymm
|
|
// Construct and append a VDIVPS instruction to the active function.
|
|
func (c *Context) VDIVPS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VDIVPS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VDIVPS: Divide Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS xmm xmm xmm
|
|
// VDIVPS m128 xmm xmm
|
|
// VDIVPS ymm ymm ymm
|
|
// VDIVPS m256 ymm ymm
|
|
// Construct and append a VDIVPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPS(mxy, xy, xy1 operand.Op) { ctx.VDIVPS(mxy, xy, xy1) }
|
|
|
|
// VDIVSD: Divide Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSD xmm xmm xmm
|
|
// VDIVSD m64 xmm xmm
|
|
// Construct and append a VDIVSD instruction to the active function.
|
|
func (c *Context) VDIVSD(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VDIVSD(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VDIVSD: Divide Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSD xmm xmm xmm
|
|
// VDIVSD m64 xmm xmm
|
|
// Construct and append a VDIVSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVSD(mx, x, x1 operand.Op) { ctx.VDIVSD(mx, x, x1) }
|
|
|
|
// VDIVSS: Divide Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSS xmm xmm xmm
|
|
// VDIVSS m32 xmm xmm
|
|
// Construct and append a VDIVSS instruction to the active function.
|
|
func (c *Context) VDIVSS(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VDIVSS(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VDIVSS: Divide Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSS xmm xmm xmm
|
|
// VDIVSS m32 xmm xmm
|
|
// Construct and append a VDIVSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVSS(mx, x, x1 operand.Op) { ctx.VDIVSS(mx, x, x1) }
|
|
|
|
// VDPPD: Dot Product of Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDPPD imm8 xmm xmm xmm
|
|
// VDPPD imm8 m128 xmm xmm
|
|
// Construct and append a VDPPD instruction to the active function.
|
|
func (c *Context) VDPPD(i, mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VDPPD(i, mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VDPPD: Dot Product of Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDPPD imm8 xmm xmm xmm
|
|
// VDPPD imm8 m128 xmm xmm
|
|
// Construct and append a VDPPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDPPD(i, mx, x, x1 operand.Op) { ctx.VDPPD(i, mx, x, x1) }
|
|
|
|
// VDPPS: Dot Product of Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDPPS imm8 xmm xmm xmm
|
|
// VDPPS imm8 m128 xmm xmm
|
|
// VDPPS imm8 ymm ymm ymm
|
|
// VDPPS imm8 m256 ymm ymm
|
|
// Construct and append a VDPPS instruction to the active function.
|
|
func (c *Context) VDPPS(i, mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VDPPS(i, mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VDPPS: Dot Product of Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDPPS imm8 xmm xmm xmm
|
|
// VDPPS imm8 m128 xmm xmm
|
|
// VDPPS imm8 ymm ymm ymm
|
|
// VDPPS imm8 m256 ymm ymm
|
|
// Construct and append a VDPPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDPPS(i, mxy, xy, xy1 operand.Op) { ctx.VDPPS(i, mxy, xy, xy1) }
|
|
|
|
// VEXTRACTF128: Extract Packed Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTF128 imm8 ymm xmm
|
|
// VEXTRACTF128 imm8 ymm m128
|
|
// Construct and append a VEXTRACTF128 instruction to the active function.
|
|
func (c *Context) VEXTRACTF128(i, y, mx operand.Op) {
|
|
if inst, err := x86.VEXTRACTF128(i, y, mx); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VEXTRACTF128: Extract Packed Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTF128 imm8 ymm xmm
|
|
// VEXTRACTF128 imm8 ymm m128
|
|
// Construct and append a VEXTRACTF128 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXTRACTF128(i, y, mx operand.Op) { ctx.VEXTRACTF128(i, y, mx) }
|
|
|
|
// VEXTRACTI128: Extract Packed Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTI128 imm8 ymm xmm
|
|
// VEXTRACTI128 imm8 ymm m128
|
|
// Construct and append a VEXTRACTI128 instruction to the active function.
|
|
func (c *Context) VEXTRACTI128(i, y, mx operand.Op) {
|
|
if inst, err := x86.VEXTRACTI128(i, y, mx); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VEXTRACTI128: Extract Packed Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTI128 imm8 ymm xmm
|
|
// VEXTRACTI128 imm8 ymm m128
|
|
// Construct and append a VEXTRACTI128 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXTRACTI128(i, y, mx operand.Op) { ctx.VEXTRACTI128(i, y, mx) }
|
|
|
|
// VEXTRACTPS: Extract Packed Single Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTPS imm8 xmm r32
|
|
// VEXTRACTPS imm8 xmm m32
|
|
// Construct and append a VEXTRACTPS instruction to the active function.
|
|
func (c *Context) VEXTRACTPS(i, x, mr operand.Op) {
|
|
if inst, err := x86.VEXTRACTPS(i, x, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VEXTRACTPS: Extract Packed Single Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTPS imm8 xmm r32
|
|
// VEXTRACTPS imm8 xmm m32
|
|
// Construct and append a VEXTRACTPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXTRACTPS(i, x, mr operand.Op) { ctx.VEXTRACTPS(i, x, mr) }
|
|
|
|
// VFMADD132PD: Fused Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD xmm xmm xmm
|
|
// VFMADD132PD m128 xmm xmm
|
|
// VFMADD132PD ymm ymm ymm
|
|
// VFMADD132PD m256 ymm ymm
|
|
// Construct and append a VFMADD132PD instruction to the active function.
|
|
func (c *Context) VFMADD132PD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMADD132PD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMADD132PD: Fused Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD xmm xmm xmm
|
|
// VFMADD132PD m128 xmm xmm
|
|
// VFMADD132PD ymm ymm ymm
|
|
// VFMADD132PD m256 ymm ymm
|
|
// Construct and append a VFMADD132PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PD(mxy, xy, xy1 operand.Op) { ctx.VFMADD132PD(mxy, xy, xy1) }
|
|
|
|
// VFMADD132PS: Fused Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS xmm xmm xmm
|
|
// VFMADD132PS m128 xmm xmm
|
|
// VFMADD132PS ymm ymm ymm
|
|
// VFMADD132PS m256 ymm ymm
|
|
// Construct and append a VFMADD132PS instruction to the active function.
|
|
func (c *Context) VFMADD132PS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMADD132PS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMADD132PS: Fused Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS xmm xmm xmm
|
|
// VFMADD132PS m128 xmm xmm
|
|
// VFMADD132PS ymm ymm ymm
|
|
// VFMADD132PS m256 ymm ymm
|
|
// Construct and append a VFMADD132PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PS(mxy, xy, xy1 operand.Op) { ctx.VFMADD132PS(mxy, xy, xy1) }
|
|
|
|
// VFMADD132SD: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SD xmm xmm xmm
|
|
// VFMADD132SD m64 xmm xmm
|
|
// Construct and append a VFMADD132SD instruction to the active function.
|
|
func (c *Context) VFMADD132SD(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFMADD132SD(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMADD132SD: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SD xmm xmm xmm
|
|
// VFMADD132SD m64 xmm xmm
|
|
// Construct and append a VFMADD132SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132SD(mx, x, x1 operand.Op) { ctx.VFMADD132SD(mx, x, x1) }
|
|
|
|
// VFMADD132SS: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SS xmm xmm xmm
|
|
// VFMADD132SS m32 xmm xmm
|
|
// Construct and append a VFMADD132SS instruction to the active function.
|
|
func (c *Context) VFMADD132SS(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFMADD132SS(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMADD132SS: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SS xmm xmm xmm
|
|
// VFMADD132SS m32 xmm xmm
|
|
// Construct and append a VFMADD132SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132SS(mx, x, x1 operand.Op) { ctx.VFMADD132SS(mx, x, x1) }
|
|
|
|
// VFMADD213PD: Fused Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD xmm xmm xmm
|
|
// VFMADD213PD m128 xmm xmm
|
|
// VFMADD213PD ymm ymm ymm
|
|
// VFMADD213PD m256 ymm ymm
|
|
// Construct and append a VFMADD213PD instruction to the active function.
|
|
func (c *Context) VFMADD213PD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMADD213PD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMADD213PD: Fused Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD xmm xmm xmm
|
|
// VFMADD213PD m128 xmm xmm
|
|
// VFMADD213PD ymm ymm ymm
|
|
// VFMADD213PD m256 ymm ymm
|
|
// Construct and append a VFMADD213PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PD(mxy, xy, xy1 operand.Op) { ctx.VFMADD213PD(mxy, xy, xy1) }
|
|
|
|
// VFMADD213PS: Fused Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS xmm xmm xmm
|
|
// VFMADD213PS m128 xmm xmm
|
|
// VFMADD213PS ymm ymm ymm
|
|
// VFMADD213PS m256 ymm ymm
|
|
// Construct and append a VFMADD213PS instruction to the active function.
|
|
func (c *Context) VFMADD213PS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMADD213PS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMADD213PS: Fused Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS xmm xmm xmm
|
|
// VFMADD213PS m128 xmm xmm
|
|
// VFMADD213PS ymm ymm ymm
|
|
// VFMADD213PS m256 ymm ymm
|
|
// Construct and append a VFMADD213PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PS(mxy, xy, xy1 operand.Op) { ctx.VFMADD213PS(mxy, xy, xy1) }
|
|
|
|
// VFMADD213SD: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SD xmm xmm xmm
|
|
// VFMADD213SD m64 xmm xmm
|
|
// Construct and append a VFMADD213SD instruction to the active function.
|
|
func (c *Context) VFMADD213SD(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFMADD213SD(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMADD213SD: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SD xmm xmm xmm
|
|
// VFMADD213SD m64 xmm xmm
|
|
// Construct and append a VFMADD213SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213SD(mx, x, x1 operand.Op) { ctx.VFMADD213SD(mx, x, x1) }
|
|
|
|
// VFMADD213SS: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SS xmm xmm xmm
|
|
// VFMADD213SS m32 xmm xmm
|
|
// Construct and append a VFMADD213SS instruction to the active function.
|
|
func (c *Context) VFMADD213SS(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFMADD213SS(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMADD213SS: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SS xmm xmm xmm
|
|
// VFMADD213SS m32 xmm xmm
|
|
// Construct and append a VFMADD213SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213SS(mx, x, x1 operand.Op) { ctx.VFMADD213SS(mx, x, x1) }
|
|
|
|
// VFMADD231PD: Fused Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD xmm xmm xmm
|
|
// VFMADD231PD m128 xmm xmm
|
|
// VFMADD231PD ymm ymm ymm
|
|
// VFMADD231PD m256 ymm ymm
|
|
// Construct and append a VFMADD231PD instruction to the active function.
|
|
func (c *Context) VFMADD231PD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMADD231PD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMADD231PD: Fused Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD xmm xmm xmm
|
|
// VFMADD231PD m128 xmm xmm
|
|
// VFMADD231PD ymm ymm ymm
|
|
// VFMADD231PD m256 ymm ymm
|
|
// Construct and append a VFMADD231PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PD(mxy, xy, xy1 operand.Op) { ctx.VFMADD231PD(mxy, xy, xy1) }
|
|
|
|
// VFMADD231PS: Fused Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS xmm xmm xmm
|
|
// VFMADD231PS m128 xmm xmm
|
|
// VFMADD231PS ymm ymm ymm
|
|
// VFMADD231PS m256 ymm ymm
|
|
// Construct and append a VFMADD231PS instruction to the active function.
|
|
func (c *Context) VFMADD231PS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMADD231PS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMADD231PS: Fused Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS xmm xmm xmm
|
|
// VFMADD231PS m128 xmm xmm
|
|
// VFMADD231PS ymm ymm ymm
|
|
// VFMADD231PS m256 ymm ymm
|
|
// Construct and append a VFMADD231PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PS(mxy, xy, xy1 operand.Op) { ctx.VFMADD231PS(mxy, xy, xy1) }
|
|
|
|
// VFMADD231SD: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SD xmm xmm xmm
|
|
// VFMADD231SD m64 xmm xmm
|
|
// Construct and append a VFMADD231SD instruction to the active function.
|
|
func (c *Context) VFMADD231SD(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFMADD231SD(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMADD231SD: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SD xmm xmm xmm
|
|
// VFMADD231SD m64 xmm xmm
|
|
// Construct and append a VFMADD231SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231SD(mx, x, x1 operand.Op) { ctx.VFMADD231SD(mx, x, x1) }
|
|
|
|
// VFMADD231SS: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SS xmm xmm xmm
|
|
// VFMADD231SS m32 xmm xmm
|
|
// Construct and append a VFMADD231SS instruction to the active function.
|
|
func (c *Context) VFMADD231SS(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFMADD231SS(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMADD231SS: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SS xmm xmm xmm
|
|
// VFMADD231SS m32 xmm xmm
|
|
// Construct and append a VFMADD231SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231SS(mx, x, x1 operand.Op) { ctx.VFMADD231SS(mx, x, x1) }
|
|
|
|
// VFMADDSUB132PD: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD xmm xmm xmm
|
|
// VFMADDSUB132PD m128 xmm xmm
|
|
// VFMADDSUB132PD ymm ymm ymm
|
|
// VFMADDSUB132PD m256 ymm ymm
|
|
// Construct and append a VFMADDSUB132PD instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMADDSUB132PD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMADDSUB132PD: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD xmm xmm xmm
|
|
// VFMADDSUB132PD m128 xmm xmm
|
|
// VFMADDSUB132PD ymm ymm ymm
|
|
// VFMADDSUB132PD m256 ymm ymm
|
|
// Construct and append a VFMADDSUB132PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PD(mxy, xy, xy1 operand.Op) { ctx.VFMADDSUB132PD(mxy, xy, xy1) }
|
|
|
|
// VFMADDSUB132PS: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS xmm xmm xmm
|
|
// VFMADDSUB132PS m128 xmm xmm
|
|
// VFMADDSUB132PS ymm ymm ymm
|
|
// VFMADDSUB132PS m256 ymm ymm
|
|
// Construct and append a VFMADDSUB132PS instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMADDSUB132PS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMADDSUB132PS: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS xmm xmm xmm
|
|
// VFMADDSUB132PS m128 xmm xmm
|
|
// VFMADDSUB132PS ymm ymm ymm
|
|
// VFMADDSUB132PS m256 ymm ymm
|
|
// Construct and append a VFMADDSUB132PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PS(mxy, xy, xy1 operand.Op) { ctx.VFMADDSUB132PS(mxy, xy, xy1) }
|
|
|
|
// VFMADDSUB213PD: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD xmm xmm xmm
|
|
// VFMADDSUB213PD m128 xmm xmm
|
|
// VFMADDSUB213PD ymm ymm ymm
|
|
// VFMADDSUB213PD m256 ymm ymm
|
|
// Construct and append a VFMADDSUB213PD instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMADDSUB213PD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMADDSUB213PD: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD xmm xmm xmm
|
|
// VFMADDSUB213PD m128 xmm xmm
|
|
// VFMADDSUB213PD ymm ymm ymm
|
|
// VFMADDSUB213PD m256 ymm ymm
|
|
// Construct and append a VFMADDSUB213PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PD(mxy, xy, xy1 operand.Op) { ctx.VFMADDSUB213PD(mxy, xy, xy1) }
|
|
|
|
// VFMADDSUB213PS: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS xmm xmm xmm
|
|
// VFMADDSUB213PS m128 xmm xmm
|
|
// VFMADDSUB213PS ymm ymm ymm
|
|
// VFMADDSUB213PS m256 ymm ymm
|
|
// Construct and append a VFMADDSUB213PS instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMADDSUB213PS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMADDSUB213PS: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS xmm xmm xmm
|
|
// VFMADDSUB213PS m128 xmm xmm
|
|
// VFMADDSUB213PS ymm ymm ymm
|
|
// VFMADDSUB213PS m256 ymm ymm
|
|
// Construct and append a VFMADDSUB213PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PS(mxy, xy, xy1 operand.Op) { ctx.VFMADDSUB213PS(mxy, xy, xy1) }
|
|
|
|
// VFMADDSUB231PD: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD xmm xmm xmm
|
|
// VFMADDSUB231PD m128 xmm xmm
|
|
// VFMADDSUB231PD ymm ymm ymm
|
|
// VFMADDSUB231PD m256 ymm ymm
|
|
// Construct and append a VFMADDSUB231PD instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMADDSUB231PD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMADDSUB231PD: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD xmm xmm xmm
|
|
// VFMADDSUB231PD m128 xmm xmm
|
|
// VFMADDSUB231PD ymm ymm ymm
|
|
// VFMADDSUB231PD m256 ymm ymm
|
|
// Construct and append a VFMADDSUB231PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PD(mxy, xy, xy1 operand.Op) { ctx.VFMADDSUB231PD(mxy, xy, xy1) }
|
|
|
|
// VFMADDSUB231PS: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS xmm xmm xmm
|
|
// VFMADDSUB231PS m128 xmm xmm
|
|
// VFMADDSUB231PS ymm ymm ymm
|
|
// VFMADDSUB231PS m256 ymm ymm
|
|
// Construct and append a VFMADDSUB231PS instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMADDSUB231PS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMADDSUB231PS: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS xmm xmm xmm
|
|
// VFMADDSUB231PS m128 xmm xmm
|
|
// VFMADDSUB231PS ymm ymm ymm
|
|
// VFMADDSUB231PS m256 ymm ymm
|
|
// Construct and append a VFMADDSUB231PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PS(mxy, xy, xy1 operand.Op) { ctx.VFMADDSUB231PS(mxy, xy, xy1) }
|
|
|
|
// VFMSUB132PD: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD xmm xmm xmm
|
|
// VFMSUB132PD m128 xmm xmm
|
|
// VFMSUB132PD ymm ymm ymm
|
|
// VFMSUB132PD m256 ymm ymm
|
|
// Construct and append a VFMSUB132PD instruction to the active function.
|
|
func (c *Context) VFMSUB132PD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMSUB132PD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMSUB132PD: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD xmm xmm xmm
|
|
// VFMSUB132PD m128 xmm xmm
|
|
// VFMSUB132PD ymm ymm ymm
|
|
// VFMSUB132PD m256 ymm ymm
|
|
// Construct and append a VFMSUB132PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PD(mxy, xy, xy1 operand.Op) { ctx.VFMSUB132PD(mxy, xy, xy1) }
|
|
|
|
// VFMSUB132PS: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS xmm xmm xmm
|
|
// VFMSUB132PS m128 xmm xmm
|
|
// VFMSUB132PS ymm ymm ymm
|
|
// VFMSUB132PS m256 ymm ymm
|
|
// Construct and append a VFMSUB132PS instruction to the active function.
|
|
func (c *Context) VFMSUB132PS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMSUB132PS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMSUB132PS: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS xmm xmm xmm
|
|
// VFMSUB132PS m128 xmm xmm
|
|
// VFMSUB132PS ymm ymm ymm
|
|
// VFMSUB132PS m256 ymm ymm
|
|
// Construct and append a VFMSUB132PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PS(mxy, xy, xy1 operand.Op) { ctx.VFMSUB132PS(mxy, xy, xy1) }
|
|
|
|
// VFMSUB132SD: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SD xmm xmm xmm
|
|
// VFMSUB132SD m64 xmm xmm
|
|
// Construct and append a VFMSUB132SD instruction to the active function.
|
|
func (c *Context) VFMSUB132SD(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFMSUB132SD(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMSUB132SD: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SD xmm xmm xmm
|
|
// VFMSUB132SD m64 xmm xmm
|
|
// Construct and append a VFMSUB132SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132SD(mx, x, x1 operand.Op) { ctx.VFMSUB132SD(mx, x, x1) }
|
|
|
|
// VFMSUB132SS: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SS xmm xmm xmm
|
|
// VFMSUB132SS m32 xmm xmm
|
|
// Construct and append a VFMSUB132SS instruction to the active function.
|
|
func (c *Context) VFMSUB132SS(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFMSUB132SS(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMSUB132SS: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SS xmm xmm xmm
|
|
// VFMSUB132SS m32 xmm xmm
|
|
// Construct and append a VFMSUB132SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132SS(mx, x, x1 operand.Op) { ctx.VFMSUB132SS(mx, x, x1) }
|
|
|
|
// VFMSUB213PD: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD xmm xmm xmm
|
|
// VFMSUB213PD m128 xmm xmm
|
|
// VFMSUB213PD ymm ymm ymm
|
|
// VFMSUB213PD m256 ymm ymm
|
|
// Construct and append a VFMSUB213PD instruction to the active function.
|
|
func (c *Context) VFMSUB213PD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMSUB213PD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMSUB213PD: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD xmm xmm xmm
|
|
// VFMSUB213PD m128 xmm xmm
|
|
// VFMSUB213PD ymm ymm ymm
|
|
// VFMSUB213PD m256 ymm ymm
|
|
// Construct and append a VFMSUB213PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PD(mxy, xy, xy1 operand.Op) { ctx.VFMSUB213PD(mxy, xy, xy1) }
|
|
|
|
// VFMSUB213PS: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS xmm xmm xmm
|
|
// VFMSUB213PS m128 xmm xmm
|
|
// VFMSUB213PS ymm ymm ymm
|
|
// VFMSUB213PS m256 ymm ymm
|
|
// Construct and append a VFMSUB213PS instruction to the active function.
|
|
func (c *Context) VFMSUB213PS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMSUB213PS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMSUB213PS: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS xmm xmm xmm
|
|
// VFMSUB213PS m128 xmm xmm
|
|
// VFMSUB213PS ymm ymm ymm
|
|
// VFMSUB213PS m256 ymm ymm
|
|
// Construct and append a VFMSUB213PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PS(mxy, xy, xy1 operand.Op) { ctx.VFMSUB213PS(mxy, xy, xy1) }
|
|
|
|
// VFMSUB213SD: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SD xmm xmm xmm
|
|
// VFMSUB213SD m64 xmm xmm
|
|
// Construct and append a VFMSUB213SD instruction to the active function.
|
|
func (c *Context) VFMSUB213SD(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFMSUB213SD(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMSUB213SD: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SD xmm xmm xmm
|
|
// VFMSUB213SD m64 xmm xmm
|
|
// Construct and append a VFMSUB213SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213SD(mx, x, x1 operand.Op) { ctx.VFMSUB213SD(mx, x, x1) }
|
|
|
|
// VFMSUB213SS: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SS xmm xmm xmm
|
|
// VFMSUB213SS m32 xmm xmm
|
|
// Construct and append a VFMSUB213SS instruction to the active function.
|
|
func (c *Context) VFMSUB213SS(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFMSUB213SS(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMSUB213SS: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SS xmm xmm xmm
|
|
// VFMSUB213SS m32 xmm xmm
|
|
// Construct and append a VFMSUB213SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213SS(mx, x, x1 operand.Op) { ctx.VFMSUB213SS(mx, x, x1) }
|
|
|
|
// VFMSUB231PD: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD xmm xmm xmm
|
|
// VFMSUB231PD m128 xmm xmm
|
|
// VFMSUB231PD ymm ymm ymm
|
|
// VFMSUB231PD m256 ymm ymm
|
|
// Construct and append a VFMSUB231PD instruction to the active function.
|
|
func (c *Context) VFMSUB231PD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMSUB231PD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMSUB231PD: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD xmm xmm xmm
|
|
// VFMSUB231PD m128 xmm xmm
|
|
// VFMSUB231PD ymm ymm ymm
|
|
// VFMSUB231PD m256 ymm ymm
|
|
// Construct and append a VFMSUB231PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PD(mxy, xy, xy1 operand.Op) { ctx.VFMSUB231PD(mxy, xy, xy1) }
|
|
|
|
// VFMSUB231PS: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS xmm xmm xmm
|
|
// VFMSUB231PS m128 xmm xmm
|
|
// VFMSUB231PS ymm ymm ymm
|
|
// VFMSUB231PS m256 ymm ymm
|
|
// Construct and append a VFMSUB231PS instruction to the active function.
|
|
func (c *Context) VFMSUB231PS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMSUB231PS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMSUB231PS: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS xmm xmm xmm
|
|
// VFMSUB231PS m128 xmm xmm
|
|
// VFMSUB231PS ymm ymm ymm
|
|
// VFMSUB231PS m256 ymm ymm
|
|
// Construct and append a VFMSUB231PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PS(mxy, xy, xy1 operand.Op) { ctx.VFMSUB231PS(mxy, xy, xy1) }
|
|
|
|
// VFMSUB231SD: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SD xmm xmm xmm
|
|
// VFMSUB231SD m64 xmm xmm
|
|
// Construct and append a VFMSUB231SD instruction to the active function.
|
|
func (c *Context) VFMSUB231SD(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFMSUB231SD(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMSUB231SD: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SD xmm xmm xmm
|
|
// VFMSUB231SD m64 xmm xmm
|
|
// Construct and append a VFMSUB231SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231SD(mx, x, x1 operand.Op) { ctx.VFMSUB231SD(mx, x, x1) }
|
|
|
|
// VFMSUB231SS: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SS xmm xmm xmm
|
|
// VFMSUB231SS m32 xmm xmm
|
|
// Construct and append a VFMSUB231SS instruction to the active function.
|
|
func (c *Context) VFMSUB231SS(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFMSUB231SS(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMSUB231SS: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SS xmm xmm xmm
|
|
// VFMSUB231SS m32 xmm xmm
|
|
// Construct and append a VFMSUB231SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231SS(mx, x, x1 operand.Op) { ctx.VFMSUB231SS(mx, x, x1) }
|
|
|
|
// VFMSUBADD132PD: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD xmm xmm xmm
|
|
// VFMSUBADD132PD m128 xmm xmm
|
|
// VFMSUBADD132PD ymm ymm ymm
|
|
// VFMSUBADD132PD m256 ymm ymm
|
|
// Construct and append a VFMSUBADD132PD instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMSUBADD132PD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMSUBADD132PD: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD xmm xmm xmm
|
|
// VFMSUBADD132PD m128 xmm xmm
|
|
// VFMSUBADD132PD ymm ymm ymm
|
|
// VFMSUBADD132PD m256 ymm ymm
|
|
// Construct and append a VFMSUBADD132PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PD(mxy, xy, xy1 operand.Op) { ctx.VFMSUBADD132PD(mxy, xy, xy1) }
|
|
|
|
// VFMSUBADD132PS: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS xmm xmm xmm
|
|
// VFMSUBADD132PS m128 xmm xmm
|
|
// VFMSUBADD132PS ymm ymm ymm
|
|
// VFMSUBADD132PS m256 ymm ymm
|
|
// Construct and append a VFMSUBADD132PS instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMSUBADD132PS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMSUBADD132PS: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS xmm xmm xmm
|
|
// VFMSUBADD132PS m128 xmm xmm
|
|
// VFMSUBADD132PS ymm ymm ymm
|
|
// VFMSUBADD132PS m256 ymm ymm
|
|
// Construct and append a VFMSUBADD132PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PS(mxy, xy, xy1 operand.Op) { ctx.VFMSUBADD132PS(mxy, xy, xy1) }
|
|
|
|
// VFMSUBADD213PD: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD xmm xmm xmm
|
|
// VFMSUBADD213PD m128 xmm xmm
|
|
// VFMSUBADD213PD ymm ymm ymm
|
|
// VFMSUBADD213PD m256 ymm ymm
|
|
// Construct and append a VFMSUBADD213PD instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMSUBADD213PD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMSUBADD213PD: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD xmm xmm xmm
|
|
// VFMSUBADD213PD m128 xmm xmm
|
|
// VFMSUBADD213PD ymm ymm ymm
|
|
// VFMSUBADD213PD m256 ymm ymm
|
|
// Construct and append a VFMSUBADD213PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PD(mxy, xy, xy1 operand.Op) { ctx.VFMSUBADD213PD(mxy, xy, xy1) }
|
|
|
|
// VFMSUBADD213PS: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS xmm xmm xmm
|
|
// VFMSUBADD213PS m128 xmm xmm
|
|
// VFMSUBADD213PS ymm ymm ymm
|
|
// VFMSUBADD213PS m256 ymm ymm
|
|
// Construct and append a VFMSUBADD213PS instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMSUBADD213PS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMSUBADD213PS: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS xmm xmm xmm
|
|
// VFMSUBADD213PS m128 xmm xmm
|
|
// VFMSUBADD213PS ymm ymm ymm
|
|
// VFMSUBADD213PS m256 ymm ymm
|
|
// Construct and append a VFMSUBADD213PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PS(mxy, xy, xy1 operand.Op) { ctx.VFMSUBADD213PS(mxy, xy, xy1) }
|
|
|
|
// VFMSUBADD231PD: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD xmm xmm xmm
|
|
// VFMSUBADD231PD m128 xmm xmm
|
|
// VFMSUBADD231PD ymm ymm ymm
|
|
// VFMSUBADD231PD m256 ymm ymm
|
|
// Construct and append a VFMSUBADD231PD instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMSUBADD231PD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMSUBADD231PD: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD xmm xmm xmm
|
|
// VFMSUBADD231PD m128 xmm xmm
|
|
// VFMSUBADD231PD ymm ymm ymm
|
|
// VFMSUBADD231PD m256 ymm ymm
|
|
// Construct and append a VFMSUBADD231PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PD(mxy, xy, xy1 operand.Op) { ctx.VFMSUBADD231PD(mxy, xy, xy1) }
|
|
|
|
// VFMSUBADD231PS: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS xmm xmm xmm
|
|
// VFMSUBADD231PS m128 xmm xmm
|
|
// VFMSUBADD231PS ymm ymm ymm
|
|
// VFMSUBADD231PS m256 ymm ymm
|
|
// Construct and append a VFMSUBADD231PS instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFMSUBADD231PS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFMSUBADD231PS: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS xmm xmm xmm
|
|
// VFMSUBADD231PS m128 xmm xmm
|
|
// VFMSUBADD231PS ymm ymm ymm
|
|
// VFMSUBADD231PS m256 ymm ymm
|
|
// Construct and append a VFMSUBADD231PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PS(mxy, xy, xy1 operand.Op) { ctx.VFMSUBADD231PS(mxy, xy, xy1) }
|
|
|
|
// VFNMADD132PD: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD xmm xmm xmm
|
|
// VFNMADD132PD m128 xmm xmm
|
|
// VFNMADD132PD ymm ymm ymm
|
|
// VFNMADD132PD m256 ymm ymm
|
|
// Construct and append a VFNMADD132PD instruction to the active function.
|
|
func (c *Context) VFNMADD132PD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFNMADD132PD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMADD132PD: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD xmm xmm xmm
|
|
// VFNMADD132PD m128 xmm xmm
|
|
// VFNMADD132PD ymm ymm ymm
|
|
// VFNMADD132PD m256 ymm ymm
|
|
// Construct and append a VFNMADD132PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PD(mxy, xy, xy1 operand.Op) { ctx.VFNMADD132PD(mxy, xy, xy1) }
|
|
|
|
// VFNMADD132PS: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS xmm xmm xmm
|
|
// VFNMADD132PS m128 xmm xmm
|
|
// VFNMADD132PS ymm ymm ymm
|
|
// VFNMADD132PS m256 ymm ymm
|
|
// Construct and append a VFNMADD132PS instruction to the active function.
|
|
func (c *Context) VFNMADD132PS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFNMADD132PS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMADD132PS: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS xmm xmm xmm
|
|
// VFNMADD132PS m128 xmm xmm
|
|
// VFNMADD132PS ymm ymm ymm
|
|
// VFNMADD132PS m256 ymm ymm
|
|
// Construct and append a VFNMADD132PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PS(mxy, xy, xy1 operand.Op) { ctx.VFNMADD132PS(mxy, xy, xy1) }
|
|
|
|
// VFNMADD132SD: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SD xmm xmm xmm
|
|
// VFNMADD132SD m64 xmm xmm
|
|
// Construct and append a VFNMADD132SD instruction to the active function.
|
|
func (c *Context) VFNMADD132SD(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFNMADD132SD(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMADD132SD: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SD xmm xmm xmm
|
|
// VFNMADD132SD m64 xmm xmm
|
|
// Construct and append a VFNMADD132SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132SD(mx, x, x1 operand.Op) { ctx.VFNMADD132SD(mx, x, x1) }
|
|
|
|
// VFNMADD132SS: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SS xmm xmm xmm
|
|
// VFNMADD132SS m32 xmm xmm
|
|
// Construct and append a VFNMADD132SS instruction to the active function.
|
|
func (c *Context) VFNMADD132SS(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFNMADD132SS(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMADD132SS: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SS xmm xmm xmm
|
|
// VFNMADD132SS m32 xmm xmm
|
|
// Construct and append a VFNMADD132SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132SS(mx, x, x1 operand.Op) { ctx.VFNMADD132SS(mx, x, x1) }
|
|
|
|
// VFNMADD213PD: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD xmm xmm xmm
|
|
// VFNMADD213PD m128 xmm xmm
|
|
// VFNMADD213PD ymm ymm ymm
|
|
// VFNMADD213PD m256 ymm ymm
|
|
// Construct and append a VFNMADD213PD instruction to the active function.
|
|
func (c *Context) VFNMADD213PD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFNMADD213PD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMADD213PD: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD xmm xmm xmm
|
|
// VFNMADD213PD m128 xmm xmm
|
|
// VFNMADD213PD ymm ymm ymm
|
|
// VFNMADD213PD m256 ymm ymm
|
|
// Construct and append a VFNMADD213PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PD(mxy, xy, xy1 operand.Op) { ctx.VFNMADD213PD(mxy, xy, xy1) }
|
|
|
|
// VFNMADD213PS: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS xmm xmm xmm
|
|
// VFNMADD213PS m128 xmm xmm
|
|
// VFNMADD213PS ymm ymm ymm
|
|
// VFNMADD213PS m256 ymm ymm
|
|
// Construct and append a VFNMADD213PS instruction to the active function.
|
|
func (c *Context) VFNMADD213PS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFNMADD213PS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMADD213PS: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS xmm xmm xmm
|
|
// VFNMADD213PS m128 xmm xmm
|
|
// VFNMADD213PS ymm ymm ymm
|
|
// VFNMADD213PS m256 ymm ymm
|
|
// Construct and append a VFNMADD213PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PS(mxy, xy, xy1 operand.Op) { ctx.VFNMADD213PS(mxy, xy, xy1) }
|
|
|
|
// VFNMADD213SD: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SD xmm xmm xmm
|
|
// VFNMADD213SD m64 xmm xmm
|
|
// Construct and append a VFNMADD213SD instruction to the active function.
|
|
func (c *Context) VFNMADD213SD(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFNMADD213SD(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMADD213SD: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SD xmm xmm xmm
|
|
// VFNMADD213SD m64 xmm xmm
|
|
// Construct and append a VFNMADD213SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213SD(mx, x, x1 operand.Op) { ctx.VFNMADD213SD(mx, x, x1) }
|
|
|
|
// VFNMADD213SS: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SS xmm xmm xmm
|
|
// VFNMADD213SS m32 xmm xmm
|
|
// Construct and append a VFNMADD213SS instruction to the active function.
|
|
func (c *Context) VFNMADD213SS(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFNMADD213SS(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMADD213SS: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SS xmm xmm xmm
|
|
// VFNMADD213SS m32 xmm xmm
|
|
// Construct and append a VFNMADD213SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213SS(mx, x, x1 operand.Op) { ctx.VFNMADD213SS(mx, x, x1) }
|
|
|
|
// VFNMADD231PD: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD xmm xmm xmm
|
|
// VFNMADD231PD m128 xmm xmm
|
|
// VFNMADD231PD ymm ymm ymm
|
|
// VFNMADD231PD m256 ymm ymm
|
|
// Construct and append a VFNMADD231PD instruction to the active function.
|
|
func (c *Context) VFNMADD231PD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFNMADD231PD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMADD231PD: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD xmm xmm xmm
|
|
// VFNMADD231PD m128 xmm xmm
|
|
// VFNMADD231PD ymm ymm ymm
|
|
// VFNMADD231PD m256 ymm ymm
|
|
// Construct and append a VFNMADD231PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PD(mxy, xy, xy1 operand.Op) { ctx.VFNMADD231PD(mxy, xy, xy1) }
|
|
|
|
// VFNMADD231PS: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS xmm xmm xmm
|
|
// VFNMADD231PS m128 xmm xmm
|
|
// VFNMADD231PS ymm ymm ymm
|
|
// VFNMADD231PS m256 ymm ymm
|
|
// Construct and append a VFNMADD231PS instruction to the active function.
|
|
func (c *Context) VFNMADD231PS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFNMADD231PS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMADD231PS: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS xmm xmm xmm
|
|
// VFNMADD231PS m128 xmm xmm
|
|
// VFNMADD231PS ymm ymm ymm
|
|
// VFNMADD231PS m256 ymm ymm
|
|
// Construct and append a VFNMADD231PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PS(mxy, xy, xy1 operand.Op) { ctx.VFNMADD231PS(mxy, xy, xy1) }
|
|
|
|
// VFNMADD231SD: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SD xmm xmm xmm
|
|
// VFNMADD231SD m64 xmm xmm
|
|
// Construct and append a VFNMADD231SD instruction to the active function.
|
|
func (c *Context) VFNMADD231SD(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFNMADD231SD(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMADD231SD: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SD xmm xmm xmm
|
|
// VFNMADD231SD m64 xmm xmm
|
|
// Construct and append a VFNMADD231SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231SD(mx, x, x1 operand.Op) { ctx.VFNMADD231SD(mx, x, x1) }
|
|
|
|
// VFNMADD231SS: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SS xmm xmm xmm
|
|
// VFNMADD231SS m32 xmm xmm
|
|
// Construct and append a VFNMADD231SS instruction to the active function.
|
|
func (c *Context) VFNMADD231SS(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFNMADD231SS(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMADD231SS: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SS xmm xmm xmm
|
|
// VFNMADD231SS m32 xmm xmm
|
|
// Construct and append a VFNMADD231SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231SS(mx, x, x1 operand.Op) { ctx.VFNMADD231SS(mx, x, x1) }
|
|
|
|
// VFNMSUB132PD: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD xmm xmm xmm
|
|
// VFNMSUB132PD m128 xmm xmm
|
|
// VFNMSUB132PD ymm ymm ymm
|
|
// VFNMSUB132PD m256 ymm ymm
|
|
// Construct and append a VFNMSUB132PD instruction to the active function.
|
|
func (c *Context) VFNMSUB132PD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFNMSUB132PD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMSUB132PD: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD xmm xmm xmm
|
|
// VFNMSUB132PD m128 xmm xmm
|
|
// VFNMSUB132PD ymm ymm ymm
|
|
// VFNMSUB132PD m256 ymm ymm
|
|
// Construct and append a VFNMSUB132PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PD(mxy, xy, xy1 operand.Op) { ctx.VFNMSUB132PD(mxy, xy, xy1) }
|
|
|
|
// VFNMSUB132PS: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS xmm xmm xmm
|
|
// VFNMSUB132PS m128 xmm xmm
|
|
// VFNMSUB132PS ymm ymm ymm
|
|
// VFNMSUB132PS m256 ymm ymm
|
|
// Construct and append a VFNMSUB132PS instruction to the active function.
|
|
func (c *Context) VFNMSUB132PS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFNMSUB132PS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMSUB132PS: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS xmm xmm xmm
|
|
// VFNMSUB132PS m128 xmm xmm
|
|
// VFNMSUB132PS ymm ymm ymm
|
|
// VFNMSUB132PS m256 ymm ymm
|
|
// Construct and append a VFNMSUB132PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PS(mxy, xy, xy1 operand.Op) { ctx.VFNMSUB132PS(mxy, xy, xy1) }
|
|
|
|
// VFNMSUB132SD: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SD xmm xmm xmm
|
|
// VFNMSUB132SD m64 xmm xmm
|
|
// Construct and append a VFNMSUB132SD instruction to the active function.
|
|
func (c *Context) VFNMSUB132SD(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFNMSUB132SD(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMSUB132SD: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SD xmm xmm xmm
|
|
// VFNMSUB132SD m64 xmm xmm
|
|
// Construct and append a VFNMSUB132SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132SD(mx, x, x1 operand.Op) { ctx.VFNMSUB132SD(mx, x, x1) }
|
|
|
|
// VFNMSUB132SS: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SS xmm xmm xmm
|
|
// VFNMSUB132SS m32 xmm xmm
|
|
// Construct and append a VFNMSUB132SS instruction to the active function.
|
|
func (c *Context) VFNMSUB132SS(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFNMSUB132SS(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMSUB132SS: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SS xmm xmm xmm
|
|
// VFNMSUB132SS m32 xmm xmm
|
|
// Construct and append a VFNMSUB132SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132SS(mx, x, x1 operand.Op) { ctx.VFNMSUB132SS(mx, x, x1) }
|
|
|
|
// VFNMSUB213PD: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD xmm xmm xmm
|
|
// VFNMSUB213PD m128 xmm xmm
|
|
// VFNMSUB213PD ymm ymm ymm
|
|
// VFNMSUB213PD m256 ymm ymm
|
|
// Construct and append a VFNMSUB213PD instruction to the active function.
|
|
func (c *Context) VFNMSUB213PD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFNMSUB213PD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMSUB213PD: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD xmm xmm xmm
|
|
// VFNMSUB213PD m128 xmm xmm
|
|
// VFNMSUB213PD ymm ymm ymm
|
|
// VFNMSUB213PD m256 ymm ymm
|
|
// Construct and append a VFNMSUB213PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PD(mxy, xy, xy1 operand.Op) { ctx.VFNMSUB213PD(mxy, xy, xy1) }
|
|
|
|
// VFNMSUB213PS: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS xmm xmm xmm
|
|
// VFNMSUB213PS m128 xmm xmm
|
|
// VFNMSUB213PS ymm ymm ymm
|
|
// VFNMSUB213PS m256 ymm ymm
|
|
// Construct and append a VFNMSUB213PS instruction to the active function.
|
|
func (c *Context) VFNMSUB213PS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFNMSUB213PS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMSUB213PS: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS xmm xmm xmm
|
|
// VFNMSUB213PS m128 xmm xmm
|
|
// VFNMSUB213PS ymm ymm ymm
|
|
// VFNMSUB213PS m256 ymm ymm
|
|
// Construct and append a VFNMSUB213PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PS(mxy, xy, xy1 operand.Op) { ctx.VFNMSUB213PS(mxy, xy, xy1) }
|
|
|
|
// VFNMSUB213SD: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SD xmm xmm xmm
|
|
// VFNMSUB213SD m64 xmm xmm
|
|
// Construct and append a VFNMSUB213SD instruction to the active function.
|
|
func (c *Context) VFNMSUB213SD(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFNMSUB213SD(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMSUB213SD: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SD xmm xmm xmm
|
|
// VFNMSUB213SD m64 xmm xmm
|
|
// Construct and append a VFNMSUB213SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213SD(mx, x, x1 operand.Op) { ctx.VFNMSUB213SD(mx, x, x1) }
|
|
|
|
// VFNMSUB213SS: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SS xmm xmm xmm
|
|
// VFNMSUB213SS m32 xmm xmm
|
|
// Construct and append a VFNMSUB213SS instruction to the active function.
|
|
func (c *Context) VFNMSUB213SS(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFNMSUB213SS(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMSUB213SS: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SS xmm xmm xmm
|
|
// VFNMSUB213SS m32 xmm xmm
|
|
// Construct and append a VFNMSUB213SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213SS(mx, x, x1 operand.Op) { ctx.VFNMSUB213SS(mx, x, x1) }
|
|
|
|
// VFNMSUB231PD: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD xmm xmm xmm
|
|
// VFNMSUB231PD m128 xmm xmm
|
|
// VFNMSUB231PD ymm ymm ymm
|
|
// VFNMSUB231PD m256 ymm ymm
|
|
// Construct and append a VFNMSUB231PD instruction to the active function.
|
|
func (c *Context) VFNMSUB231PD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFNMSUB231PD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMSUB231PD: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD xmm xmm xmm
|
|
// VFNMSUB231PD m128 xmm xmm
|
|
// VFNMSUB231PD ymm ymm ymm
|
|
// VFNMSUB231PD m256 ymm ymm
|
|
// Construct and append a VFNMSUB231PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PD(mxy, xy, xy1 operand.Op) { ctx.VFNMSUB231PD(mxy, xy, xy1) }
|
|
|
|
// VFNMSUB231PS: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS xmm xmm xmm
|
|
// VFNMSUB231PS m128 xmm xmm
|
|
// VFNMSUB231PS ymm ymm ymm
|
|
// VFNMSUB231PS m256 ymm ymm
|
|
// Construct and append a VFNMSUB231PS instruction to the active function.
|
|
func (c *Context) VFNMSUB231PS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VFNMSUB231PS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMSUB231PS: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS xmm xmm xmm
|
|
// VFNMSUB231PS m128 xmm xmm
|
|
// VFNMSUB231PS ymm ymm ymm
|
|
// VFNMSUB231PS m256 ymm ymm
|
|
// Construct and append a VFNMSUB231PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PS(mxy, xy, xy1 operand.Op) { ctx.VFNMSUB231PS(mxy, xy, xy1) }
|
|
|
|
// VFNMSUB231SD: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SD xmm xmm xmm
|
|
// VFNMSUB231SD m64 xmm xmm
|
|
// Construct and append a VFNMSUB231SD instruction to the active function.
|
|
func (c *Context) VFNMSUB231SD(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFNMSUB231SD(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMSUB231SD: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SD xmm xmm xmm
|
|
// VFNMSUB231SD m64 xmm xmm
|
|
// Construct and append a VFNMSUB231SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231SD(mx, x, x1 operand.Op) { ctx.VFNMSUB231SD(mx, x, x1) }
|
|
|
|
// VFNMSUB231SS: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SS xmm xmm xmm
|
|
// VFNMSUB231SS m32 xmm xmm
|
|
// Construct and append a VFNMSUB231SS instruction to the active function.
|
|
func (c *Context) VFNMSUB231SS(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VFNMSUB231SS(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VFNMSUB231SS: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SS xmm xmm xmm
|
|
// VFNMSUB231SS m32 xmm xmm
|
|
// Construct and append a VFNMSUB231SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231SS(mx, x, x1 operand.Op) { ctx.VFNMSUB231SS(mx, x, x1) }
|
|
|
|
// VGATHERDPD: Gather Packed Double-Precision Floating-Point Values Using Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGATHERDPD xmm vm32x xmm
|
|
// VGATHERDPD ymm vm32x ymm
|
|
// Construct and append a VGATHERDPD instruction to the active function.
|
|
func (c *Context) VGATHERDPD(xy, v, xy1 operand.Op) {
|
|
if inst, err := x86.VGATHERDPD(xy, v, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VGATHERDPD: Gather Packed Double-Precision Floating-Point Values Using Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGATHERDPD xmm vm32x xmm
|
|
// VGATHERDPD ymm vm32x ymm
|
|
// Construct and append a VGATHERDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGATHERDPD(xy, v, xy1 operand.Op) { ctx.VGATHERDPD(xy, v, xy1) }
|
|
|
|
// VGATHERDPS: Gather Packed Single-Precision Floating-Point Values Using Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGATHERDPS xmm vm32x xmm
|
|
// VGATHERDPS ymm vm32y ymm
|
|
// Construct and append a VGATHERDPS instruction to the active function.
|
|
func (c *Context) VGATHERDPS(xy, v, xy1 operand.Op) {
|
|
if inst, err := x86.VGATHERDPS(xy, v, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VGATHERDPS: Gather Packed Single-Precision Floating-Point Values Using Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGATHERDPS xmm vm32x xmm
|
|
// VGATHERDPS ymm vm32y ymm
|
|
// Construct and append a VGATHERDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGATHERDPS(xy, v, xy1 operand.Op) { ctx.VGATHERDPS(xy, v, xy1) }
|
|
|
|
// VGATHERQPD: Gather Packed Double-Precision Floating-Point Values Using Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGATHERQPD xmm vm64x xmm
|
|
// VGATHERQPD ymm vm64y ymm
|
|
// Construct and append a VGATHERQPD instruction to the active function.
|
|
func (c *Context) VGATHERQPD(xy, v, xy1 operand.Op) {
|
|
if inst, err := x86.VGATHERQPD(xy, v, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VGATHERQPD: Gather Packed Double-Precision Floating-Point Values Using Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGATHERQPD xmm vm64x xmm
|
|
// VGATHERQPD ymm vm64y ymm
|
|
// Construct and append a VGATHERQPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGATHERQPD(xy, v, xy1 operand.Op) { ctx.VGATHERQPD(xy, v, xy1) }
|
|
|
|
// VGATHERQPS: Gather Packed Single-Precision Floating-Point Values Using Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGATHERQPS xmm vm64x xmm
|
|
// VGATHERQPS xmm vm64y xmm
|
|
// Construct and append a VGATHERQPS instruction to the active function.
|
|
func (c *Context) VGATHERQPS(x, v, x1 operand.Op) {
|
|
if inst, err := x86.VGATHERQPS(x, v, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VGATHERQPS: Gather Packed Single-Precision Floating-Point Values Using Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGATHERQPS xmm vm64x xmm
|
|
// VGATHERQPS xmm vm64y xmm
|
|
// Construct and append a VGATHERQPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGATHERQPS(x, v, x1 operand.Op) { ctx.VGATHERQPS(x, v, x1) }
|
|
|
|
// VHADDPD: Packed Double-FP Horizontal Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VHADDPD xmm xmm xmm
|
|
// VHADDPD m128 xmm xmm
|
|
// VHADDPD ymm ymm ymm
|
|
// VHADDPD m256 ymm ymm
|
|
// Construct and append a VHADDPD instruction to the active function.
|
|
func (c *Context) VHADDPD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VHADDPD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VHADDPD: Packed Double-FP Horizontal Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VHADDPD xmm xmm xmm
|
|
// VHADDPD m128 xmm xmm
|
|
// VHADDPD ymm ymm ymm
|
|
// VHADDPD m256 ymm ymm
|
|
// Construct and append a VHADDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VHADDPD(mxy, xy, xy1 operand.Op) { ctx.VHADDPD(mxy, xy, xy1) }
|
|
|
|
// VHADDPS: Packed Single-FP Horizontal Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VHADDPS xmm xmm xmm
|
|
// VHADDPS m128 xmm xmm
|
|
// VHADDPS ymm ymm ymm
|
|
// VHADDPS m256 ymm ymm
|
|
// Construct and append a VHADDPS instruction to the active function.
|
|
func (c *Context) VHADDPS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VHADDPS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VHADDPS: Packed Single-FP Horizontal Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VHADDPS xmm xmm xmm
|
|
// VHADDPS m128 xmm xmm
|
|
// VHADDPS ymm ymm ymm
|
|
// VHADDPS m256 ymm ymm
|
|
// Construct and append a VHADDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VHADDPS(mxy, xy, xy1 operand.Op) { ctx.VHADDPS(mxy, xy, xy1) }
|
|
|
|
// VHSUBPD: Packed Double-FP Horizontal Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VHSUBPD xmm xmm xmm
|
|
// VHSUBPD m128 xmm xmm
|
|
// VHSUBPD ymm ymm ymm
|
|
// VHSUBPD m256 ymm ymm
|
|
// Construct and append a VHSUBPD instruction to the active function.
|
|
func (c *Context) VHSUBPD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VHSUBPD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VHSUBPD: Packed Double-FP Horizontal Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VHSUBPD xmm xmm xmm
|
|
// VHSUBPD m128 xmm xmm
|
|
// VHSUBPD ymm ymm ymm
|
|
// VHSUBPD m256 ymm ymm
|
|
// Construct and append a VHSUBPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VHSUBPD(mxy, xy, xy1 operand.Op) { ctx.VHSUBPD(mxy, xy, xy1) }
|
|
|
|
// VHSUBPS: Packed Single-FP Horizontal Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VHSUBPS xmm xmm xmm
|
|
// VHSUBPS m128 xmm xmm
|
|
// VHSUBPS ymm ymm ymm
|
|
// VHSUBPS m256 ymm ymm
|
|
// Construct and append a VHSUBPS instruction to the active function.
|
|
func (c *Context) VHSUBPS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VHSUBPS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VHSUBPS: Packed Single-FP Horizontal Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VHSUBPS xmm xmm xmm
|
|
// VHSUBPS m128 xmm xmm
|
|
// VHSUBPS ymm ymm ymm
|
|
// VHSUBPS m256 ymm ymm
|
|
// Construct and append a VHSUBPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VHSUBPS(mxy, xy, xy1 operand.Op) { ctx.VHSUBPS(mxy, xy, xy1) }
|
|
|
|
// VINSERTF128: Insert Packed Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTF128 imm8 xmm ymm ymm
|
|
// VINSERTF128 imm8 m128 ymm ymm
|
|
// Construct and append a VINSERTF128 instruction to the active function.
|
|
func (c *Context) VINSERTF128(i, mx, y, y1 operand.Op) {
|
|
if inst, err := x86.VINSERTF128(i, mx, y, y1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VINSERTF128: Insert Packed Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTF128 imm8 xmm ymm ymm
|
|
// VINSERTF128 imm8 m128 ymm ymm
|
|
// Construct and append a VINSERTF128 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VINSERTF128(i, mx, y, y1 operand.Op) { ctx.VINSERTF128(i, mx, y, y1) }
|
|
|
|
// VINSERTI128: Insert Packed Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTI128 imm8 xmm ymm ymm
|
|
// VINSERTI128 imm8 m128 ymm ymm
|
|
// Construct and append a VINSERTI128 instruction to the active function.
|
|
func (c *Context) VINSERTI128(i, mx, y, y1 operand.Op) {
|
|
if inst, err := x86.VINSERTI128(i, mx, y, y1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VINSERTI128: Insert Packed Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTI128 imm8 xmm ymm ymm
|
|
// VINSERTI128 imm8 m128 ymm ymm
|
|
// Construct and append a VINSERTI128 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VINSERTI128(i, mx, y, y1 operand.Op) { ctx.VINSERTI128(i, mx, y, y1) }
|
|
|
|
// VINSERTPS: Insert Packed Single Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTPS imm8 xmm xmm xmm
|
|
// VINSERTPS imm8 m32 xmm xmm
|
|
// Construct and append a VINSERTPS instruction to the active function.
|
|
func (c *Context) VINSERTPS(i, mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VINSERTPS(i, mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VINSERTPS: Insert Packed Single Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTPS imm8 xmm xmm xmm
|
|
// VINSERTPS imm8 m32 xmm xmm
|
|
// Construct and append a VINSERTPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VINSERTPS(i, mx, x, x1 operand.Op) { ctx.VINSERTPS(i, mx, x, x1) }
|
|
|
|
// VLDDQU: Load Unaligned Integer 128 Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VLDDQU m128 xmm
|
|
// VLDDQU m256 ymm
|
|
// Construct and append a VLDDQU instruction to the active function.
|
|
func (c *Context) VLDDQU(m, xy operand.Op) {
|
|
if inst, err := x86.VLDDQU(m, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VLDDQU: Load Unaligned Integer 128 Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VLDDQU m128 xmm
|
|
// VLDDQU m256 ymm
|
|
// Construct and append a VLDDQU instruction to the active function.
|
|
// Operates on the global context.
|
|
func VLDDQU(m, xy operand.Op) { ctx.VLDDQU(m, xy) }
|
|
|
|
// VLDMXCSR: Load MXCSR Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VLDMXCSR m32
|
|
// Construct and append a VLDMXCSR instruction to the active function.
|
|
func (c *Context) VLDMXCSR(m operand.Op) {
|
|
if inst, err := x86.VLDMXCSR(m); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VLDMXCSR: Load MXCSR Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VLDMXCSR m32
|
|
// Construct and append a VLDMXCSR instruction to the active function.
|
|
// Operates on the global context.
|
|
func VLDMXCSR(m operand.Op) { ctx.VLDMXCSR(m) }
|
|
|
|
// VMASKMOVDQU: Store Selected Bytes of Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMASKMOVDQU xmm xmm
|
|
// Construct and append a VMASKMOVDQU instruction to the active function.
|
|
func (c *Context) VMASKMOVDQU(x, x1 operand.Op) {
|
|
if inst, err := x86.VMASKMOVDQU(x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMASKMOVDQU: Store Selected Bytes of Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMASKMOVDQU xmm xmm
|
|
// Construct and append a VMASKMOVDQU instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMASKMOVDQU(x, x1 operand.Op) { ctx.VMASKMOVDQU(x, x1) }
|
|
|
|
// VMASKMOVPD: Conditional Move Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMASKMOVPD m128 xmm xmm
|
|
// VMASKMOVPD m256 ymm ymm
|
|
// VMASKMOVPD xmm xmm m128
|
|
// VMASKMOVPD ymm ymm m256
|
|
// Construct and append a VMASKMOVPD instruction to the active function.
|
|
func (c *Context) VMASKMOVPD(mxy, xy, mxy1 operand.Op) {
|
|
if inst, err := x86.VMASKMOVPD(mxy, xy, mxy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMASKMOVPD: Conditional Move Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMASKMOVPD m128 xmm xmm
|
|
// VMASKMOVPD m256 ymm ymm
|
|
// VMASKMOVPD xmm xmm m128
|
|
// VMASKMOVPD ymm ymm m256
|
|
// Construct and append a VMASKMOVPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMASKMOVPD(mxy, xy, mxy1 operand.Op) { ctx.VMASKMOVPD(mxy, xy, mxy1) }
|
|
|
|
// VMASKMOVPS: Conditional Move Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMASKMOVPS m128 xmm xmm
|
|
// VMASKMOVPS m256 ymm ymm
|
|
// VMASKMOVPS xmm xmm m128
|
|
// VMASKMOVPS ymm ymm m256
|
|
// Construct and append a VMASKMOVPS instruction to the active function.
|
|
func (c *Context) VMASKMOVPS(mxy, xy, mxy1 operand.Op) {
|
|
if inst, err := x86.VMASKMOVPS(mxy, xy, mxy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMASKMOVPS: Conditional Move Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMASKMOVPS m128 xmm xmm
|
|
// VMASKMOVPS m256 ymm ymm
|
|
// VMASKMOVPS xmm xmm m128
|
|
// VMASKMOVPS ymm ymm m256
|
|
// Construct and append a VMASKMOVPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMASKMOVPS(mxy, xy, mxy1 operand.Op) { ctx.VMASKMOVPS(mxy, xy, mxy1) }
|
|
|
|
// VMAXPD: Return Maximum Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPD xmm xmm xmm
|
|
// VMAXPD m128 xmm xmm
|
|
// VMAXPD ymm ymm ymm
|
|
// VMAXPD m256 ymm ymm
|
|
// Construct and append a VMAXPD instruction to the active function.
|
|
func (c *Context) VMAXPD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VMAXPD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMAXPD: Return Maximum Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPD xmm xmm xmm
|
|
// VMAXPD m128 xmm xmm
|
|
// VMAXPD ymm ymm ymm
|
|
// VMAXPD m256 ymm ymm
|
|
// Construct and append a VMAXPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXPD(mxy, xy, xy1 operand.Op) { ctx.VMAXPD(mxy, xy, xy1) }
|
|
|
|
// VMAXPS: Return Maximum Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPS xmm xmm xmm
|
|
// VMAXPS m128 xmm xmm
|
|
// VMAXPS ymm ymm ymm
|
|
// VMAXPS m256 ymm ymm
|
|
// Construct and append a VMAXPS instruction to the active function.
|
|
func (c *Context) VMAXPS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VMAXPS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMAXPS: Return Maximum Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPS xmm xmm xmm
|
|
// VMAXPS m128 xmm xmm
|
|
// VMAXPS ymm ymm ymm
|
|
// VMAXPS m256 ymm ymm
|
|
// Construct and append a VMAXPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXPS(mxy, xy, xy1 operand.Op) { ctx.VMAXPS(mxy, xy, xy1) }
|
|
|
|
// VMAXSD: Return Maximum Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXSD xmm xmm xmm
|
|
// VMAXSD m64 xmm xmm
|
|
// Construct and append a VMAXSD instruction to the active function.
|
|
func (c *Context) VMAXSD(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VMAXSD(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMAXSD: Return Maximum Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXSD xmm xmm xmm
|
|
// VMAXSD m64 xmm xmm
|
|
// Construct and append a VMAXSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXSD(mx, x, x1 operand.Op) { ctx.VMAXSD(mx, x, x1) }
|
|
|
|
// VMAXSS: Return Maximum Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXSS xmm xmm xmm
|
|
// VMAXSS m32 xmm xmm
|
|
// Construct and append a VMAXSS instruction to the active function.
|
|
func (c *Context) VMAXSS(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VMAXSS(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMAXSS: Return Maximum Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXSS xmm xmm xmm
|
|
// VMAXSS m32 xmm xmm
|
|
// Construct and append a VMAXSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXSS(mx, x, x1 operand.Op) { ctx.VMAXSS(mx, x, x1) }
|
|
|
|
// VMINPD: Return Minimum Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPD xmm xmm xmm
|
|
// VMINPD m128 xmm xmm
|
|
// VMINPD ymm ymm ymm
|
|
// VMINPD m256 ymm ymm
|
|
// Construct and append a VMINPD instruction to the active function.
|
|
func (c *Context) VMINPD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VMINPD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMINPD: Return Minimum Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPD xmm xmm xmm
|
|
// VMINPD m128 xmm xmm
|
|
// VMINPD ymm ymm ymm
|
|
// VMINPD m256 ymm ymm
|
|
// Construct and append a VMINPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINPD(mxy, xy, xy1 operand.Op) { ctx.VMINPD(mxy, xy, xy1) }
|
|
|
|
// VMINPS: Return Minimum Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPS xmm xmm xmm
|
|
// VMINPS m128 xmm xmm
|
|
// VMINPS ymm ymm ymm
|
|
// VMINPS m256 ymm ymm
|
|
// Construct and append a VMINPS instruction to the active function.
|
|
func (c *Context) VMINPS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VMINPS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMINPS: Return Minimum Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPS xmm xmm xmm
|
|
// VMINPS m128 xmm xmm
|
|
// VMINPS ymm ymm ymm
|
|
// VMINPS m256 ymm ymm
|
|
// Construct and append a VMINPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINPS(mxy, xy, xy1 operand.Op) { ctx.VMINPS(mxy, xy, xy1) }
|
|
|
|
// VMINSD: Return Minimum Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINSD xmm xmm xmm
|
|
// VMINSD m64 xmm xmm
|
|
// Construct and append a VMINSD instruction to the active function.
|
|
func (c *Context) VMINSD(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VMINSD(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMINSD: Return Minimum Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINSD xmm xmm xmm
|
|
// VMINSD m64 xmm xmm
|
|
// Construct and append a VMINSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINSD(mx, x, x1 operand.Op) { ctx.VMINSD(mx, x, x1) }
|
|
|
|
// VMINSS: Return Minimum Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINSS xmm xmm xmm
|
|
// VMINSS m32 xmm xmm
|
|
// Construct and append a VMINSS instruction to the active function.
|
|
func (c *Context) VMINSS(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VMINSS(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMINSS: Return Minimum Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINSS xmm xmm xmm
|
|
// VMINSS m32 xmm xmm
|
|
// Construct and append a VMINSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINSS(mx, x, x1 operand.Op) { ctx.VMINSS(mx, x, x1) }
|
|
|
|
// VMOVAPD: Move Aligned Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVAPD xmm xmm
|
|
// VMOVAPD m128 xmm
|
|
// VMOVAPD ymm ymm
|
|
// VMOVAPD m256 ymm
|
|
// VMOVAPD xmm m128
|
|
// VMOVAPD ymm m256
|
|
// Construct and append a VMOVAPD instruction to the active function.
|
|
func (c *Context) VMOVAPD(mxy, mxy1 operand.Op) {
|
|
if inst, err := x86.VMOVAPD(mxy, mxy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVAPD: Move Aligned Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVAPD xmm xmm
|
|
// VMOVAPD m128 xmm
|
|
// VMOVAPD ymm ymm
|
|
// VMOVAPD m256 ymm
|
|
// VMOVAPD xmm m128
|
|
// VMOVAPD ymm m256
|
|
// Construct and append a VMOVAPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVAPD(mxy, mxy1 operand.Op) { ctx.VMOVAPD(mxy, mxy1) }
|
|
|
|
// VMOVAPS: Move Aligned Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVAPS xmm xmm
|
|
// VMOVAPS m128 xmm
|
|
// VMOVAPS ymm ymm
|
|
// VMOVAPS m256 ymm
|
|
// VMOVAPS xmm m128
|
|
// VMOVAPS ymm m256
|
|
// Construct and append a VMOVAPS instruction to the active function.
|
|
func (c *Context) VMOVAPS(mxy, mxy1 operand.Op) {
|
|
if inst, err := x86.VMOVAPS(mxy, mxy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVAPS: Move Aligned Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVAPS xmm xmm
|
|
// VMOVAPS m128 xmm
|
|
// VMOVAPS ymm ymm
|
|
// VMOVAPS m256 ymm
|
|
// VMOVAPS xmm m128
|
|
// VMOVAPS ymm m256
|
|
// Construct and append a VMOVAPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVAPS(mxy, mxy1 operand.Op) { ctx.VMOVAPS(mxy, mxy1) }
|
|
|
|
// VMOVD: Move Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVD xmm r32
|
|
// VMOVD r32 xmm
|
|
// VMOVD m32 xmm
|
|
// VMOVD xmm m32
|
|
// Construct and append a VMOVD instruction to the active function.
|
|
func (c *Context) VMOVD(mrx, mrx1 operand.Op) {
|
|
if inst, err := x86.VMOVD(mrx, mrx1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVD: Move Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVD xmm r32
|
|
// VMOVD r32 xmm
|
|
// VMOVD m32 xmm
|
|
// VMOVD xmm m32
|
|
// Construct and append a VMOVD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVD(mrx, mrx1 operand.Op) { ctx.VMOVD(mrx, mrx1) }
|
|
|
|
// VMOVDDUP: Move One Double-FP and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDDUP xmm xmm
|
|
// VMOVDDUP m64 xmm
|
|
// VMOVDDUP ymm ymm
|
|
// VMOVDDUP m256 ymm
|
|
// Construct and append a VMOVDDUP instruction to the active function.
|
|
func (c *Context) VMOVDDUP(mxy, xy operand.Op) {
|
|
if inst, err := x86.VMOVDDUP(mxy, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVDDUP: Move One Double-FP and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDDUP xmm xmm
|
|
// VMOVDDUP m64 xmm
|
|
// VMOVDDUP ymm ymm
|
|
// VMOVDDUP m256 ymm
|
|
// Construct and append a VMOVDDUP instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVDDUP(mxy, xy operand.Op) { ctx.VMOVDDUP(mxy, xy) }
|
|
|
|
// VMOVDQA: Move Aligned Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQA xmm xmm
|
|
// VMOVDQA m128 xmm
|
|
// VMOVDQA ymm ymm
|
|
// VMOVDQA m256 ymm
|
|
// VMOVDQA xmm m128
|
|
// VMOVDQA ymm m256
|
|
// Construct and append a VMOVDQA instruction to the active function.
|
|
func (c *Context) VMOVDQA(mxy, mxy1 operand.Op) {
|
|
if inst, err := x86.VMOVDQA(mxy, mxy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVDQA: Move Aligned Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQA xmm xmm
|
|
// VMOVDQA m128 xmm
|
|
// VMOVDQA ymm ymm
|
|
// VMOVDQA m256 ymm
|
|
// VMOVDQA xmm m128
|
|
// VMOVDQA ymm m256
|
|
// Construct and append a VMOVDQA instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVDQA(mxy, mxy1 operand.Op) { ctx.VMOVDQA(mxy, mxy1) }
|
|
|
|
// VMOVDQU: Move Unaligned Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQU xmm xmm
|
|
// VMOVDQU m128 xmm
|
|
// VMOVDQU ymm ymm
|
|
// VMOVDQU m256 ymm
|
|
// VMOVDQU xmm m128
|
|
// VMOVDQU ymm m256
|
|
// Construct and append a VMOVDQU instruction to the active function.
|
|
func (c *Context) VMOVDQU(mxy, mxy1 operand.Op) {
|
|
if inst, err := x86.VMOVDQU(mxy, mxy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVDQU: Move Unaligned Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQU xmm xmm
|
|
// VMOVDQU m128 xmm
|
|
// VMOVDQU ymm ymm
|
|
// VMOVDQU m256 ymm
|
|
// VMOVDQU xmm m128
|
|
// VMOVDQU ymm m256
|
|
// Construct and append a VMOVDQU instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVDQU(mxy, mxy1 operand.Op) { ctx.VMOVDQU(mxy, mxy1) }
|
|
|
|
// VMOVHLPS: Move Packed Single-Precision Floating-Point Values High to Low.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVHLPS xmm xmm xmm
|
|
// Construct and append a VMOVHLPS instruction to the active function.
|
|
func (c *Context) VMOVHLPS(x, x1, x2 operand.Op) {
|
|
if inst, err := x86.VMOVHLPS(x, x1, x2); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVHLPS: Move Packed Single-Precision Floating-Point Values High to Low.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVHLPS xmm xmm xmm
|
|
// Construct and append a VMOVHLPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVHLPS(x, x1, x2 operand.Op) { ctx.VMOVHLPS(x, x1, x2) }
|
|
|
|
// VMOVHPD: Move High Packed Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVHPD xmm m64
|
|
// VMOVHPD m64 xmm xmm
|
|
// Construct and append a VMOVHPD instruction to the active function.
|
|
func (c *Context) VMOVHPD(ops ...operand.Op) {
|
|
if inst, err := x86.VMOVHPD(ops...); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVHPD: Move High Packed Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVHPD xmm m64
|
|
// VMOVHPD m64 xmm xmm
|
|
// Construct and append a VMOVHPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVHPD(ops ...operand.Op) { ctx.VMOVHPD(ops...) }
|
|
|
|
// VMOVHPS: Move High Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVHPS xmm m64
|
|
// VMOVHPS m64 xmm xmm
|
|
// Construct and append a VMOVHPS instruction to the active function.
|
|
func (c *Context) VMOVHPS(ops ...operand.Op) {
|
|
if inst, err := x86.VMOVHPS(ops...); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVHPS: Move High Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVHPS xmm m64
|
|
// VMOVHPS m64 xmm xmm
|
|
// Construct and append a VMOVHPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVHPS(ops ...operand.Op) { ctx.VMOVHPS(ops...) }
|
|
|
|
// VMOVLHPS: Move Packed Single-Precision Floating-Point Values Low to High.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVLHPS xmm xmm xmm
|
|
// Construct and append a VMOVLHPS instruction to the active function.
|
|
func (c *Context) VMOVLHPS(x, x1, x2 operand.Op) {
|
|
if inst, err := x86.VMOVLHPS(x, x1, x2); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVLHPS: Move Packed Single-Precision Floating-Point Values Low to High.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVLHPS xmm xmm xmm
|
|
// Construct and append a VMOVLHPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVLHPS(x, x1, x2 operand.Op) { ctx.VMOVLHPS(x, x1, x2) }
|
|
|
|
// VMOVLPD: Move Low Packed Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVLPD xmm m64
|
|
// VMOVLPD m64 xmm xmm
|
|
// Construct and append a VMOVLPD instruction to the active function.
|
|
func (c *Context) VMOVLPD(ops ...operand.Op) {
|
|
if inst, err := x86.VMOVLPD(ops...); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVLPD: Move Low Packed Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVLPD xmm m64
|
|
// VMOVLPD m64 xmm xmm
|
|
// Construct and append a VMOVLPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVLPD(ops ...operand.Op) { ctx.VMOVLPD(ops...) }
|
|
|
|
// VMOVLPS: Move Low Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVLPS xmm m64
|
|
// VMOVLPS m64 xmm xmm
|
|
// Construct and append a VMOVLPS instruction to the active function.
|
|
func (c *Context) VMOVLPS(ops ...operand.Op) {
|
|
if inst, err := x86.VMOVLPS(ops...); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVLPS: Move Low Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVLPS xmm m64
|
|
// VMOVLPS m64 xmm xmm
|
|
// Construct and append a VMOVLPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVLPS(ops ...operand.Op) { ctx.VMOVLPS(ops...) }
|
|
|
|
// VMOVMSKPD: Extract Packed Double-Precision Floating-Point Sign Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVMSKPD xmm r32
|
|
// VMOVMSKPD ymm r32
|
|
// Construct and append a VMOVMSKPD instruction to the active function.
|
|
func (c *Context) VMOVMSKPD(xy, r operand.Op) {
|
|
if inst, err := x86.VMOVMSKPD(xy, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVMSKPD: Extract Packed Double-Precision Floating-Point Sign Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVMSKPD xmm r32
|
|
// VMOVMSKPD ymm r32
|
|
// Construct and append a VMOVMSKPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVMSKPD(xy, r operand.Op) { ctx.VMOVMSKPD(xy, r) }
|
|
|
|
// VMOVMSKPS: Extract Packed Single-Precision Floating-Point Sign Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVMSKPS xmm r32
|
|
// VMOVMSKPS ymm r32
|
|
// Construct and append a VMOVMSKPS instruction to the active function.
|
|
func (c *Context) VMOVMSKPS(xy, r operand.Op) {
|
|
if inst, err := x86.VMOVMSKPS(xy, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVMSKPS: Extract Packed Single-Precision Floating-Point Sign Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVMSKPS xmm r32
|
|
// VMOVMSKPS ymm r32
|
|
// Construct and append a VMOVMSKPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVMSKPS(xy, r operand.Op) { ctx.VMOVMSKPS(xy, r) }
|
|
|
|
// VMOVNTDQ: Store Double Quadword Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVNTDQ xmm m128
|
|
// VMOVNTDQ ymm m256
|
|
// Construct and append a VMOVNTDQ instruction to the active function.
|
|
func (c *Context) VMOVNTDQ(xy, m operand.Op) {
|
|
if inst, err := x86.VMOVNTDQ(xy, m); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVNTDQ: Store Double Quadword Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVNTDQ xmm m128
|
|
// VMOVNTDQ ymm m256
|
|
// Construct and append a VMOVNTDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVNTDQ(xy, m operand.Op) { ctx.VMOVNTDQ(xy, m) }
|
|
|
|
// VMOVNTDQA: Load Double Quadword Non-Temporal Aligned Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVNTDQA m128 xmm
|
|
// VMOVNTDQA m256 ymm
|
|
// Construct and append a VMOVNTDQA instruction to the active function.
|
|
func (c *Context) VMOVNTDQA(m, xy operand.Op) {
|
|
if inst, err := x86.VMOVNTDQA(m, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVNTDQA: Load Double Quadword Non-Temporal Aligned Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVNTDQA m128 xmm
|
|
// VMOVNTDQA m256 ymm
|
|
// Construct and append a VMOVNTDQA instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVNTDQA(m, xy operand.Op) { ctx.VMOVNTDQA(m, xy) }
|
|
|
|
// VMOVNTPD: Store Packed Double-Precision Floating-Point Values Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVNTPD xmm m128
|
|
// VMOVNTPD ymm m256
|
|
// Construct and append a VMOVNTPD instruction to the active function.
|
|
func (c *Context) VMOVNTPD(xy, m operand.Op) {
|
|
if inst, err := x86.VMOVNTPD(xy, m); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVNTPD: Store Packed Double-Precision Floating-Point Values Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVNTPD xmm m128
|
|
// VMOVNTPD ymm m256
|
|
// Construct and append a VMOVNTPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVNTPD(xy, m operand.Op) { ctx.VMOVNTPD(xy, m) }
|
|
|
|
// VMOVNTPS: Store Packed Single-Precision Floating-Point Values Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVNTPS xmm m128
|
|
// VMOVNTPS ymm m256
|
|
// Construct and append a VMOVNTPS instruction to the active function.
|
|
func (c *Context) VMOVNTPS(xy, m operand.Op) {
|
|
if inst, err := x86.VMOVNTPS(xy, m); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVNTPS: Store Packed Single-Precision Floating-Point Values Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVNTPS xmm m128
|
|
// VMOVNTPS ymm m256
|
|
// Construct and append a VMOVNTPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVNTPS(xy, m operand.Op) { ctx.VMOVNTPS(xy, m) }
|
|
|
|
// VMOVQ: Move Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVQ xmm r64
|
|
// VMOVQ r64 xmm
|
|
// VMOVQ xmm xmm
|
|
// VMOVQ m64 xmm
|
|
// VMOVQ xmm m64
|
|
// Construct and append a VMOVQ instruction to the active function.
|
|
func (c *Context) VMOVQ(mrx, mrx1 operand.Op) {
|
|
if inst, err := x86.VMOVQ(mrx, mrx1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVQ: Move Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVQ xmm r64
|
|
// VMOVQ r64 xmm
|
|
// VMOVQ xmm xmm
|
|
// VMOVQ m64 xmm
|
|
// VMOVQ xmm m64
|
|
// Construct and append a VMOVQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVQ(mrx, mrx1 operand.Op) { ctx.VMOVQ(mrx, mrx1) }
|
|
|
|
// VMOVSD: Move Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSD m64 xmm
|
|
// VMOVSD xmm m64
|
|
// VMOVSD xmm xmm xmm
|
|
// Construct and append a VMOVSD instruction to the active function.
|
|
func (c *Context) VMOVSD(ops ...operand.Op) {
|
|
if inst, err := x86.VMOVSD(ops...); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVSD: Move Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSD m64 xmm
|
|
// VMOVSD xmm m64
|
|
// VMOVSD xmm xmm xmm
|
|
// Construct and append a VMOVSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVSD(ops ...operand.Op) { ctx.VMOVSD(ops...) }
|
|
|
|
// VMOVSHDUP: Move Packed Single-FP High and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSHDUP xmm xmm
|
|
// VMOVSHDUP m128 xmm
|
|
// VMOVSHDUP ymm ymm
|
|
// VMOVSHDUP m256 ymm
|
|
// Construct and append a VMOVSHDUP instruction to the active function.
|
|
func (c *Context) VMOVSHDUP(mxy, xy operand.Op) {
|
|
if inst, err := x86.VMOVSHDUP(mxy, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVSHDUP: Move Packed Single-FP High and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSHDUP xmm xmm
|
|
// VMOVSHDUP m128 xmm
|
|
// VMOVSHDUP ymm ymm
|
|
// VMOVSHDUP m256 ymm
|
|
// Construct and append a VMOVSHDUP instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVSHDUP(mxy, xy operand.Op) { ctx.VMOVSHDUP(mxy, xy) }
|
|
|
|
// VMOVSLDUP: Move Packed Single-FP Low and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSLDUP xmm xmm
|
|
// VMOVSLDUP m128 xmm
|
|
// VMOVSLDUP ymm ymm
|
|
// VMOVSLDUP m256 ymm
|
|
// Construct and append a VMOVSLDUP instruction to the active function.
|
|
func (c *Context) VMOVSLDUP(mxy, xy operand.Op) {
|
|
if inst, err := x86.VMOVSLDUP(mxy, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVSLDUP: Move Packed Single-FP Low and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSLDUP xmm xmm
|
|
// VMOVSLDUP m128 xmm
|
|
// VMOVSLDUP ymm ymm
|
|
// VMOVSLDUP m256 ymm
|
|
// Construct and append a VMOVSLDUP instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVSLDUP(mxy, xy operand.Op) { ctx.VMOVSLDUP(mxy, xy) }
|
|
|
|
// VMOVSS: Move Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSS m32 xmm
|
|
// VMOVSS xmm m32
|
|
// VMOVSS xmm xmm xmm
|
|
// Construct and append a VMOVSS instruction to the active function.
|
|
func (c *Context) VMOVSS(ops ...operand.Op) {
|
|
if inst, err := x86.VMOVSS(ops...); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVSS: Move Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSS m32 xmm
|
|
// VMOVSS xmm m32
|
|
// VMOVSS xmm xmm xmm
|
|
// Construct and append a VMOVSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVSS(ops ...operand.Op) { ctx.VMOVSS(ops...) }
|
|
|
|
// VMOVUPD: Move Unaligned Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVUPD xmm xmm
|
|
// VMOVUPD m128 xmm
|
|
// VMOVUPD ymm ymm
|
|
// VMOVUPD m256 ymm
|
|
// VMOVUPD xmm m128
|
|
// VMOVUPD ymm m256
|
|
// Construct and append a VMOVUPD instruction to the active function.
|
|
func (c *Context) VMOVUPD(mxy, mxy1 operand.Op) {
|
|
if inst, err := x86.VMOVUPD(mxy, mxy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVUPD: Move Unaligned Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVUPD xmm xmm
|
|
// VMOVUPD m128 xmm
|
|
// VMOVUPD ymm ymm
|
|
// VMOVUPD m256 ymm
|
|
// VMOVUPD xmm m128
|
|
// VMOVUPD ymm m256
|
|
// Construct and append a VMOVUPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVUPD(mxy, mxy1 operand.Op) { ctx.VMOVUPD(mxy, mxy1) }
|
|
|
|
// VMOVUPS: Move Unaligned Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVUPS xmm xmm
|
|
// VMOVUPS m128 xmm
|
|
// VMOVUPS ymm ymm
|
|
// VMOVUPS m256 ymm
|
|
// VMOVUPS xmm m128
|
|
// VMOVUPS ymm m256
|
|
// Construct and append a VMOVUPS instruction to the active function.
|
|
func (c *Context) VMOVUPS(mxy, mxy1 operand.Op) {
|
|
if inst, err := x86.VMOVUPS(mxy, mxy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMOVUPS: Move Unaligned Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVUPS xmm xmm
|
|
// VMOVUPS m128 xmm
|
|
// VMOVUPS ymm ymm
|
|
// VMOVUPS m256 ymm
|
|
// VMOVUPS xmm m128
|
|
// VMOVUPS ymm m256
|
|
// Construct and append a VMOVUPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVUPS(mxy, mxy1 operand.Op) { ctx.VMOVUPS(mxy, mxy1) }
|
|
|
|
// VMPSADBW: Compute Multiple Packed Sums of Absolute Difference.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMPSADBW imm8 xmm xmm xmm
|
|
// VMPSADBW imm8 m128 xmm xmm
|
|
// VMPSADBW imm8 ymm ymm ymm
|
|
// VMPSADBW imm8 m256 ymm ymm
|
|
// Construct and append a VMPSADBW instruction to the active function.
|
|
func (c *Context) VMPSADBW(i, mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VMPSADBW(i, mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMPSADBW: Compute Multiple Packed Sums of Absolute Difference.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMPSADBW imm8 xmm xmm xmm
|
|
// VMPSADBW imm8 m128 xmm xmm
|
|
// VMPSADBW imm8 ymm ymm ymm
|
|
// VMPSADBW imm8 m256 ymm ymm
|
|
// Construct and append a VMPSADBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMPSADBW(i, mxy, xy, xy1 operand.Op) { ctx.VMPSADBW(i, mxy, xy, xy1) }
|
|
|
|
// VMULPD: Multiply Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD xmm xmm xmm
|
|
// VMULPD m128 xmm xmm
|
|
// VMULPD ymm ymm ymm
|
|
// VMULPD m256 ymm ymm
|
|
// Construct and append a VMULPD instruction to the active function.
|
|
func (c *Context) VMULPD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VMULPD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMULPD: Multiply Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD xmm xmm xmm
|
|
// VMULPD m128 xmm xmm
|
|
// VMULPD ymm ymm ymm
|
|
// VMULPD m256 ymm ymm
|
|
// Construct and append a VMULPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPD(mxy, xy, xy1 operand.Op) { ctx.VMULPD(mxy, xy, xy1) }
|
|
|
|
// VMULPS: Multiply Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS xmm xmm xmm
|
|
// VMULPS m128 xmm xmm
|
|
// VMULPS ymm ymm ymm
|
|
// VMULPS m256 ymm ymm
|
|
// Construct and append a VMULPS instruction to the active function.
|
|
func (c *Context) VMULPS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VMULPS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMULPS: Multiply Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS xmm xmm xmm
|
|
// VMULPS m128 xmm xmm
|
|
// VMULPS ymm ymm ymm
|
|
// VMULPS m256 ymm ymm
|
|
// Construct and append a VMULPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPS(mxy, xy, xy1 operand.Op) { ctx.VMULPS(mxy, xy, xy1) }
|
|
|
|
// VMULSD: Multiply Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSD xmm xmm xmm
|
|
// VMULSD m64 xmm xmm
|
|
// Construct and append a VMULSD instruction to the active function.
|
|
func (c *Context) VMULSD(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VMULSD(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMULSD: Multiply Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSD xmm xmm xmm
|
|
// VMULSD m64 xmm xmm
|
|
// Construct and append a VMULSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULSD(mx, x, x1 operand.Op) { ctx.VMULSD(mx, x, x1) }
|
|
|
|
// VMULSS: Multiply Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSS xmm xmm xmm
|
|
// VMULSS m32 xmm xmm
|
|
// Construct and append a VMULSS instruction to the active function.
|
|
func (c *Context) VMULSS(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VMULSS(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VMULSS: Multiply Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSS xmm xmm xmm
|
|
// VMULSS m32 xmm xmm
|
|
// Construct and append a VMULSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULSS(mx, x, x1 operand.Op) { ctx.VMULSS(mx, x, x1) }
|
|
|
|
// VORPD: Bitwise Logical OR of Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VORPD xmm xmm xmm
|
|
// VORPD m128 xmm xmm
|
|
// VORPD ymm ymm ymm
|
|
// VORPD m256 ymm ymm
|
|
// Construct and append a VORPD instruction to the active function.
|
|
func (c *Context) VORPD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VORPD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VORPD: Bitwise Logical OR of Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VORPD xmm xmm xmm
|
|
// VORPD m128 xmm xmm
|
|
// VORPD ymm ymm ymm
|
|
// VORPD m256 ymm ymm
|
|
// Construct and append a VORPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VORPD(mxy, xy, xy1 operand.Op) { ctx.VORPD(mxy, xy, xy1) }
|
|
|
|
// VORPS: Bitwise Logical OR of Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VORPS xmm xmm xmm
|
|
// VORPS m128 xmm xmm
|
|
// VORPS ymm ymm ymm
|
|
// VORPS m256 ymm ymm
|
|
// Construct and append a VORPS instruction to the active function.
|
|
func (c *Context) VORPS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VORPS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VORPS: Bitwise Logical OR of Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VORPS xmm xmm xmm
|
|
// VORPS m128 xmm xmm
|
|
// VORPS ymm ymm ymm
|
|
// VORPS m256 ymm ymm
|
|
// Construct and append a VORPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VORPS(mxy, xy, xy1 operand.Op) { ctx.VORPS(mxy, xy, xy1) }
|
|
|
|
// VPABSB: Packed Absolute Value of Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSB xmm xmm
|
|
// VPABSB m128 xmm
|
|
// VPABSB ymm ymm
|
|
// VPABSB m256 ymm
|
|
// Construct and append a VPABSB instruction to the active function.
|
|
func (c *Context) VPABSB(mxy, xy operand.Op) {
|
|
if inst, err := x86.VPABSB(mxy, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPABSB: Packed Absolute Value of Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSB xmm xmm
|
|
// VPABSB m128 xmm
|
|
// VPABSB ymm ymm
|
|
// VPABSB m256 ymm
|
|
// Construct and append a VPABSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPABSB(mxy, xy operand.Op) { ctx.VPABSB(mxy, xy) }
|
|
|
|
// VPABSD: Packed Absolute Value of Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSD xmm xmm
|
|
// VPABSD m128 xmm
|
|
// VPABSD ymm ymm
|
|
// VPABSD m256 ymm
|
|
// Construct and append a VPABSD instruction to the active function.
|
|
func (c *Context) VPABSD(mxy, xy operand.Op) {
|
|
if inst, err := x86.VPABSD(mxy, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPABSD: Packed Absolute Value of Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSD xmm xmm
|
|
// VPABSD m128 xmm
|
|
// VPABSD ymm ymm
|
|
// VPABSD m256 ymm
|
|
// Construct and append a VPABSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPABSD(mxy, xy operand.Op) { ctx.VPABSD(mxy, xy) }
|
|
|
|
// VPABSW: Packed Absolute Value of Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSW xmm xmm
|
|
// VPABSW m128 xmm
|
|
// VPABSW ymm ymm
|
|
// VPABSW m256 ymm
|
|
// Construct and append a VPABSW instruction to the active function.
|
|
func (c *Context) VPABSW(mxy, xy operand.Op) {
|
|
if inst, err := x86.VPABSW(mxy, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPABSW: Packed Absolute Value of Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSW xmm xmm
|
|
// VPABSW m128 xmm
|
|
// VPABSW ymm ymm
|
|
// VPABSW m256 ymm
|
|
// Construct and append a VPABSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPABSW(mxy, xy operand.Op) { ctx.VPABSW(mxy, xy) }
|
|
|
|
// VPACKSSDW: Pack Doublewords into Words with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKSSDW xmm xmm xmm
|
|
// VPACKSSDW m128 xmm xmm
|
|
// VPACKSSDW ymm ymm ymm
|
|
// VPACKSSDW m256 ymm ymm
|
|
// Construct and append a VPACKSSDW instruction to the active function.
|
|
func (c *Context) VPACKSSDW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPACKSSDW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPACKSSDW: Pack Doublewords into Words with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKSSDW xmm xmm xmm
|
|
// VPACKSSDW m128 xmm xmm
|
|
// VPACKSSDW ymm ymm ymm
|
|
// VPACKSSDW m256 ymm ymm
|
|
// Construct and append a VPACKSSDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPACKSSDW(mxy, xy, xy1 operand.Op) { ctx.VPACKSSDW(mxy, xy, xy1) }
|
|
|
|
// VPACKSSWB: Pack Words into Bytes with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKSSWB xmm xmm xmm
|
|
// VPACKSSWB m128 xmm xmm
|
|
// VPACKSSWB ymm ymm ymm
|
|
// VPACKSSWB m256 ymm ymm
|
|
// Construct and append a VPACKSSWB instruction to the active function.
|
|
func (c *Context) VPACKSSWB(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPACKSSWB(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPACKSSWB: Pack Words into Bytes with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKSSWB xmm xmm xmm
|
|
// VPACKSSWB m128 xmm xmm
|
|
// VPACKSSWB ymm ymm ymm
|
|
// VPACKSSWB m256 ymm ymm
|
|
// Construct and append a VPACKSSWB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPACKSSWB(mxy, xy, xy1 operand.Op) { ctx.VPACKSSWB(mxy, xy, xy1) }
|
|
|
|
// VPACKUSDW: Pack Doublewords into Words with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKUSDW xmm xmm xmm
|
|
// VPACKUSDW m128 xmm xmm
|
|
// VPACKUSDW ymm ymm ymm
|
|
// VPACKUSDW m256 ymm ymm
|
|
// Construct and append a VPACKUSDW instruction to the active function.
|
|
func (c *Context) VPACKUSDW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPACKUSDW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPACKUSDW: Pack Doublewords into Words with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKUSDW xmm xmm xmm
|
|
// VPACKUSDW m128 xmm xmm
|
|
// VPACKUSDW ymm ymm ymm
|
|
// VPACKUSDW m256 ymm ymm
|
|
// Construct and append a VPACKUSDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPACKUSDW(mxy, xy, xy1 operand.Op) { ctx.VPACKUSDW(mxy, xy, xy1) }
|
|
|
|
// VPACKUSWB: Pack Words into Bytes with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKUSWB xmm xmm xmm
|
|
// VPACKUSWB m128 xmm xmm
|
|
// VPACKUSWB ymm ymm ymm
|
|
// VPACKUSWB m256 ymm ymm
|
|
// Construct and append a VPACKUSWB instruction to the active function.
|
|
func (c *Context) VPACKUSWB(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPACKUSWB(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPACKUSWB: Pack Words into Bytes with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKUSWB xmm xmm xmm
|
|
// VPACKUSWB m128 xmm xmm
|
|
// VPACKUSWB ymm ymm ymm
|
|
// VPACKUSWB m256 ymm ymm
|
|
// Construct and append a VPACKUSWB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPACKUSWB(mxy, xy, xy1 operand.Op) { ctx.VPACKUSWB(mxy, xy, xy1) }
|
|
|
|
// VPADDB: Add Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDB xmm xmm xmm
|
|
// VPADDB m128 xmm xmm
|
|
// VPADDB ymm ymm ymm
|
|
// VPADDB m256 ymm ymm
|
|
// Construct and append a VPADDB instruction to the active function.
|
|
func (c *Context) VPADDB(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPADDB(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPADDB: Add Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDB xmm xmm xmm
|
|
// VPADDB m128 xmm xmm
|
|
// VPADDB ymm ymm ymm
|
|
// VPADDB m256 ymm ymm
|
|
// Construct and append a VPADDB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDB(mxy, xy, xy1 operand.Op) { ctx.VPADDB(mxy, xy, xy1) }
|
|
|
|
// VPADDD: Add Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDD xmm xmm xmm
|
|
// VPADDD m128 xmm xmm
|
|
// VPADDD ymm ymm ymm
|
|
// VPADDD m256 ymm ymm
|
|
// Construct and append a VPADDD instruction to the active function.
|
|
func (c *Context) VPADDD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPADDD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPADDD: Add Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDD xmm xmm xmm
|
|
// VPADDD m128 xmm xmm
|
|
// VPADDD ymm ymm ymm
|
|
// VPADDD m256 ymm ymm
|
|
// Construct and append a VPADDD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDD(mxy, xy, xy1 operand.Op) { ctx.VPADDD(mxy, xy, xy1) }
|
|
|
|
// VPADDQ: Add Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDQ xmm xmm xmm
|
|
// VPADDQ m128 xmm xmm
|
|
// VPADDQ ymm ymm ymm
|
|
// VPADDQ m256 ymm ymm
|
|
// Construct and append a VPADDQ instruction to the active function.
|
|
func (c *Context) VPADDQ(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPADDQ(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPADDQ: Add Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDQ xmm xmm xmm
|
|
// VPADDQ m128 xmm xmm
|
|
// VPADDQ ymm ymm ymm
|
|
// VPADDQ m256 ymm ymm
|
|
// Construct and append a VPADDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDQ(mxy, xy, xy1 operand.Op) { ctx.VPADDQ(mxy, xy, xy1) }
|
|
|
|
// VPADDSB: Add Packed Signed Byte Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDSB xmm xmm xmm
|
|
// VPADDSB m128 xmm xmm
|
|
// VPADDSB ymm ymm ymm
|
|
// VPADDSB m256 ymm ymm
|
|
// Construct and append a VPADDSB instruction to the active function.
|
|
func (c *Context) VPADDSB(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPADDSB(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPADDSB: Add Packed Signed Byte Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDSB xmm xmm xmm
|
|
// VPADDSB m128 xmm xmm
|
|
// VPADDSB ymm ymm ymm
|
|
// VPADDSB m256 ymm ymm
|
|
// Construct and append a VPADDSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDSB(mxy, xy, xy1 operand.Op) { ctx.VPADDSB(mxy, xy, xy1) }
|
|
|
|
// VPADDSW: Add Packed Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDSW xmm xmm xmm
|
|
// VPADDSW m128 xmm xmm
|
|
// VPADDSW ymm ymm ymm
|
|
// VPADDSW m256 ymm ymm
|
|
// Construct and append a VPADDSW instruction to the active function.
|
|
func (c *Context) VPADDSW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPADDSW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPADDSW: Add Packed Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDSW xmm xmm xmm
|
|
// VPADDSW m128 xmm xmm
|
|
// VPADDSW ymm ymm ymm
|
|
// VPADDSW m256 ymm ymm
|
|
// Construct and append a VPADDSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDSW(mxy, xy, xy1 operand.Op) { ctx.VPADDSW(mxy, xy, xy1) }
|
|
|
|
// VPADDUSB: Add Packed Unsigned Byte Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDUSB xmm xmm xmm
|
|
// VPADDUSB m128 xmm xmm
|
|
// VPADDUSB ymm ymm ymm
|
|
// VPADDUSB m256 ymm ymm
|
|
// Construct and append a VPADDUSB instruction to the active function.
|
|
func (c *Context) VPADDUSB(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPADDUSB(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPADDUSB: Add Packed Unsigned Byte Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDUSB xmm xmm xmm
|
|
// VPADDUSB m128 xmm xmm
|
|
// VPADDUSB ymm ymm ymm
|
|
// VPADDUSB m256 ymm ymm
|
|
// Construct and append a VPADDUSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDUSB(mxy, xy, xy1 operand.Op) { ctx.VPADDUSB(mxy, xy, xy1) }
|
|
|
|
// VPADDUSW: Add Packed Unsigned Word Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDUSW xmm xmm xmm
|
|
// VPADDUSW m128 xmm xmm
|
|
// VPADDUSW ymm ymm ymm
|
|
// VPADDUSW m256 ymm ymm
|
|
// Construct and append a VPADDUSW instruction to the active function.
|
|
func (c *Context) VPADDUSW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPADDUSW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPADDUSW: Add Packed Unsigned Word Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDUSW xmm xmm xmm
|
|
// VPADDUSW m128 xmm xmm
|
|
// VPADDUSW ymm ymm ymm
|
|
// VPADDUSW m256 ymm ymm
|
|
// Construct and append a VPADDUSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDUSW(mxy, xy, xy1 operand.Op) { ctx.VPADDUSW(mxy, xy, xy1) }
|
|
|
|
// VPADDW: Add Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDW xmm xmm xmm
|
|
// VPADDW m128 xmm xmm
|
|
// VPADDW ymm ymm ymm
|
|
// VPADDW m256 ymm ymm
|
|
// Construct and append a VPADDW instruction to the active function.
|
|
func (c *Context) VPADDW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPADDW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPADDW: Add Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDW xmm xmm xmm
|
|
// VPADDW m128 xmm xmm
|
|
// VPADDW ymm ymm ymm
|
|
// VPADDW m256 ymm ymm
|
|
// Construct and append a VPADDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDW(mxy, xy, xy1 operand.Op) { ctx.VPADDW(mxy, xy, xy1) }
|
|
|
|
// VPALIGNR: Packed Align Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPALIGNR imm8 xmm xmm xmm
|
|
// VPALIGNR imm8 m128 xmm xmm
|
|
// VPALIGNR imm8 ymm ymm ymm
|
|
// VPALIGNR imm8 m256 ymm ymm
|
|
// Construct and append a VPALIGNR instruction to the active function.
|
|
func (c *Context) VPALIGNR(i, mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPALIGNR(i, mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPALIGNR: Packed Align Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPALIGNR imm8 xmm xmm xmm
|
|
// VPALIGNR imm8 m128 xmm xmm
|
|
// VPALIGNR imm8 ymm ymm ymm
|
|
// VPALIGNR imm8 m256 ymm ymm
|
|
// Construct and append a VPALIGNR instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPALIGNR(i, mxy, xy, xy1 operand.Op) { ctx.VPALIGNR(i, mxy, xy, xy1) }
|
|
|
|
// VPAND: Packed Bitwise Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPAND xmm xmm xmm
|
|
// VPAND m128 xmm xmm
|
|
// VPAND ymm ymm ymm
|
|
// VPAND m256 ymm ymm
|
|
// Construct and append a VPAND instruction to the active function.
|
|
func (c *Context) VPAND(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPAND(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPAND: Packed Bitwise Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPAND xmm xmm xmm
|
|
// VPAND m128 xmm xmm
|
|
// VPAND ymm ymm ymm
|
|
// VPAND m256 ymm ymm
|
|
// Construct and append a VPAND instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPAND(mxy, xy, xy1 operand.Op) { ctx.VPAND(mxy, xy, xy1) }
|
|
|
|
// VPANDN: Packed Bitwise Logical AND NOT.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDN xmm xmm xmm
|
|
// VPANDN m128 xmm xmm
|
|
// VPANDN ymm ymm ymm
|
|
// VPANDN m256 ymm ymm
|
|
// Construct and append a VPANDN instruction to the active function.
|
|
func (c *Context) VPANDN(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPANDN(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPANDN: Packed Bitwise Logical AND NOT.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDN xmm xmm xmm
|
|
// VPANDN m128 xmm xmm
|
|
// VPANDN ymm ymm ymm
|
|
// VPANDN m256 ymm ymm
|
|
// Construct and append a VPANDN instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPANDN(mxy, xy, xy1 operand.Op) { ctx.VPANDN(mxy, xy, xy1) }
|
|
|
|
// VPAVGB: Average Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPAVGB xmm xmm xmm
|
|
// VPAVGB m128 xmm xmm
|
|
// VPAVGB ymm ymm ymm
|
|
// VPAVGB m256 ymm ymm
|
|
// Construct and append a VPAVGB instruction to the active function.
|
|
func (c *Context) VPAVGB(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPAVGB(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPAVGB: Average Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPAVGB xmm xmm xmm
|
|
// VPAVGB m128 xmm xmm
|
|
// VPAVGB ymm ymm ymm
|
|
// VPAVGB m256 ymm ymm
|
|
// Construct and append a VPAVGB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPAVGB(mxy, xy, xy1 operand.Op) { ctx.VPAVGB(mxy, xy, xy1) }
|
|
|
|
// VPAVGW: Average Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPAVGW xmm xmm xmm
|
|
// VPAVGW m128 xmm xmm
|
|
// VPAVGW ymm ymm ymm
|
|
// VPAVGW m256 ymm ymm
|
|
// Construct and append a VPAVGW instruction to the active function.
|
|
func (c *Context) VPAVGW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPAVGW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPAVGW: Average Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPAVGW xmm xmm xmm
|
|
// VPAVGW m128 xmm xmm
|
|
// VPAVGW ymm ymm ymm
|
|
// VPAVGW m256 ymm ymm
|
|
// Construct and append a VPAVGW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPAVGW(mxy, xy, xy1 operand.Op) { ctx.VPAVGW(mxy, xy, xy1) }
|
|
|
|
// VPBLENDD: Blend Packed Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDD imm8 xmm xmm xmm
|
|
// VPBLENDD imm8 m128 xmm xmm
|
|
// VPBLENDD imm8 ymm ymm ymm
|
|
// VPBLENDD imm8 m256 ymm ymm
|
|
// Construct and append a VPBLENDD instruction to the active function.
|
|
func (c *Context) VPBLENDD(i, mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPBLENDD(i, mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPBLENDD: Blend Packed Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDD imm8 xmm xmm xmm
|
|
// VPBLENDD imm8 m128 xmm xmm
|
|
// VPBLENDD imm8 ymm ymm ymm
|
|
// VPBLENDD imm8 m256 ymm ymm
|
|
// Construct and append a VPBLENDD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBLENDD(i, mxy, xy, xy1 operand.Op) { ctx.VPBLENDD(i, mxy, xy, xy1) }
|
|
|
|
// VPBLENDVB: Variable Blend Packed Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDVB xmm xmm xmm xmm
|
|
// VPBLENDVB xmm m128 xmm xmm
|
|
// VPBLENDVB ymm ymm ymm ymm
|
|
// VPBLENDVB ymm m256 ymm ymm
|
|
// Construct and append a VPBLENDVB instruction to the active function.
|
|
func (c *Context) VPBLENDVB(xy, mxy, xy1, xy2 operand.Op) {
|
|
if inst, err := x86.VPBLENDVB(xy, mxy, xy1, xy2); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPBLENDVB: Variable Blend Packed Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDVB xmm xmm xmm xmm
|
|
// VPBLENDVB xmm m128 xmm xmm
|
|
// VPBLENDVB ymm ymm ymm ymm
|
|
// VPBLENDVB ymm m256 ymm ymm
|
|
// Construct and append a VPBLENDVB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBLENDVB(xy, mxy, xy1, xy2 operand.Op) { ctx.VPBLENDVB(xy, mxy, xy1, xy2) }
|
|
|
|
// VPBLENDW: Blend Packed Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDW imm8 xmm xmm xmm
|
|
// VPBLENDW imm8 m128 xmm xmm
|
|
// VPBLENDW imm8 ymm ymm ymm
|
|
// VPBLENDW imm8 m256 ymm ymm
|
|
// Construct and append a VPBLENDW instruction to the active function.
|
|
func (c *Context) VPBLENDW(i, mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPBLENDW(i, mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPBLENDW: Blend Packed Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDW imm8 xmm xmm xmm
|
|
// VPBLENDW imm8 m128 xmm xmm
|
|
// VPBLENDW imm8 ymm ymm ymm
|
|
// VPBLENDW imm8 m256 ymm ymm
|
|
// Construct and append a VPBLENDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBLENDW(i, mxy, xy, xy1 operand.Op) { ctx.VPBLENDW(i, mxy, xy, xy1) }
|
|
|
|
// VPBROADCASTB: Broadcast Byte Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTB xmm xmm
|
|
// VPBROADCASTB m8 xmm
|
|
// VPBROADCASTB xmm ymm
|
|
// VPBROADCASTB m8 ymm
|
|
// Construct and append a VPBROADCASTB instruction to the active function.
|
|
func (c *Context) VPBROADCASTB(mx, xy operand.Op) {
|
|
if inst, err := x86.VPBROADCASTB(mx, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPBROADCASTB: Broadcast Byte Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTB xmm xmm
|
|
// VPBROADCASTB m8 xmm
|
|
// VPBROADCASTB xmm ymm
|
|
// VPBROADCASTB m8 ymm
|
|
// Construct and append a VPBROADCASTB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBROADCASTB(mx, xy operand.Op) { ctx.VPBROADCASTB(mx, xy) }
|
|
|
|
// VPBROADCASTD: Broadcast Doubleword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTD xmm xmm
|
|
// VPBROADCASTD m32 xmm
|
|
// VPBROADCASTD xmm ymm
|
|
// VPBROADCASTD m32 ymm
|
|
// Construct and append a VPBROADCASTD instruction to the active function.
|
|
func (c *Context) VPBROADCASTD(mx, xy operand.Op) {
|
|
if inst, err := x86.VPBROADCASTD(mx, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPBROADCASTD: Broadcast Doubleword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTD xmm xmm
|
|
// VPBROADCASTD m32 xmm
|
|
// VPBROADCASTD xmm ymm
|
|
// VPBROADCASTD m32 ymm
|
|
// Construct and append a VPBROADCASTD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBROADCASTD(mx, xy operand.Op) { ctx.VPBROADCASTD(mx, xy) }
|
|
|
|
// VPBROADCASTQ: Broadcast Quadword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTQ xmm xmm
|
|
// VPBROADCASTQ m64 xmm
|
|
// VPBROADCASTQ xmm ymm
|
|
// VPBROADCASTQ m64 ymm
|
|
// Construct and append a VPBROADCASTQ instruction to the active function.
|
|
func (c *Context) VPBROADCASTQ(mx, xy operand.Op) {
|
|
if inst, err := x86.VPBROADCASTQ(mx, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPBROADCASTQ: Broadcast Quadword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTQ xmm xmm
|
|
// VPBROADCASTQ m64 xmm
|
|
// VPBROADCASTQ xmm ymm
|
|
// VPBROADCASTQ m64 ymm
|
|
// Construct and append a VPBROADCASTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBROADCASTQ(mx, xy operand.Op) { ctx.VPBROADCASTQ(mx, xy) }
|
|
|
|
// VPBROADCASTW: Broadcast Word Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTW xmm xmm
|
|
// VPBROADCASTW m16 xmm
|
|
// VPBROADCASTW xmm ymm
|
|
// VPBROADCASTW m16 ymm
|
|
// Construct and append a VPBROADCASTW instruction to the active function.
|
|
func (c *Context) VPBROADCASTW(mx, xy operand.Op) {
|
|
if inst, err := x86.VPBROADCASTW(mx, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPBROADCASTW: Broadcast Word Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTW xmm xmm
|
|
// VPBROADCASTW m16 xmm
|
|
// VPBROADCASTW xmm ymm
|
|
// VPBROADCASTW m16 ymm
|
|
// Construct and append a VPBROADCASTW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBROADCASTW(mx, xy operand.Op) { ctx.VPBROADCASTW(mx, xy) }
|
|
|
|
// VPCLMULQDQ: Carry-Less Quadword Multiplication.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCLMULQDQ imm8 xmm xmm xmm
|
|
// VPCLMULQDQ imm8 m128 xmm xmm
|
|
// Construct and append a VPCLMULQDQ instruction to the active function.
|
|
func (c *Context) VPCLMULQDQ(i, mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VPCLMULQDQ(i, mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPCLMULQDQ: Carry-Less Quadword Multiplication.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCLMULQDQ imm8 xmm xmm xmm
|
|
// VPCLMULQDQ imm8 m128 xmm xmm
|
|
// Construct and append a VPCLMULQDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCLMULQDQ(i, mx, x, x1 operand.Op) { ctx.VPCLMULQDQ(i, mx, x, x1) }
|
|
|
|
// VPCMPEQB: Compare Packed Byte Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPEQB xmm xmm xmm
|
|
// VPCMPEQB m128 xmm xmm
|
|
// VPCMPEQB ymm ymm ymm
|
|
// VPCMPEQB m256 ymm ymm
|
|
// Construct and append a VPCMPEQB instruction to the active function.
|
|
func (c *Context) VPCMPEQB(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPCMPEQB(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPCMPEQB: Compare Packed Byte Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPEQB xmm xmm xmm
|
|
// VPCMPEQB m128 xmm xmm
|
|
// VPCMPEQB ymm ymm ymm
|
|
// VPCMPEQB m256 ymm ymm
|
|
// Construct and append a VPCMPEQB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPEQB(mxy, xy, xy1 operand.Op) { ctx.VPCMPEQB(mxy, xy, xy1) }
|
|
|
|
// VPCMPEQD: Compare Packed Doubleword Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPEQD xmm xmm xmm
|
|
// VPCMPEQD m128 xmm xmm
|
|
// VPCMPEQD ymm ymm ymm
|
|
// VPCMPEQD m256 ymm ymm
|
|
// Construct and append a VPCMPEQD instruction to the active function.
|
|
func (c *Context) VPCMPEQD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPCMPEQD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPCMPEQD: Compare Packed Doubleword Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPEQD xmm xmm xmm
|
|
// VPCMPEQD m128 xmm xmm
|
|
// VPCMPEQD ymm ymm ymm
|
|
// VPCMPEQD m256 ymm ymm
|
|
// Construct and append a VPCMPEQD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPEQD(mxy, xy, xy1 operand.Op) { ctx.VPCMPEQD(mxy, xy, xy1) }
|
|
|
|
// VPCMPEQQ: Compare Packed Quadword Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPEQQ xmm xmm xmm
|
|
// VPCMPEQQ m128 xmm xmm
|
|
// VPCMPEQQ ymm ymm ymm
|
|
// VPCMPEQQ m256 ymm ymm
|
|
// Construct and append a VPCMPEQQ instruction to the active function.
|
|
func (c *Context) VPCMPEQQ(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPCMPEQQ(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPCMPEQQ: Compare Packed Quadword Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPEQQ xmm xmm xmm
|
|
// VPCMPEQQ m128 xmm xmm
|
|
// VPCMPEQQ ymm ymm ymm
|
|
// VPCMPEQQ m256 ymm ymm
|
|
// Construct and append a VPCMPEQQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPEQQ(mxy, xy, xy1 operand.Op) { ctx.VPCMPEQQ(mxy, xy, xy1) }
|
|
|
|
// VPCMPEQW: Compare Packed Word Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPEQW xmm xmm xmm
|
|
// VPCMPEQW m128 xmm xmm
|
|
// VPCMPEQW ymm ymm ymm
|
|
// VPCMPEQW m256 ymm ymm
|
|
// Construct and append a VPCMPEQW instruction to the active function.
|
|
func (c *Context) VPCMPEQW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPCMPEQW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPCMPEQW: Compare Packed Word Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPEQW xmm xmm xmm
|
|
// VPCMPEQW m128 xmm xmm
|
|
// VPCMPEQW ymm ymm ymm
|
|
// VPCMPEQW m256 ymm ymm
|
|
// Construct and append a VPCMPEQW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPEQW(mxy, xy, xy1 operand.Op) { ctx.VPCMPEQW(mxy, xy, xy1) }
|
|
|
|
// VPCMPESTRI: Packed Compare Explicit Length Strings, Return Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPESTRI imm8 xmm xmm
|
|
// VPCMPESTRI imm8 m128 xmm
|
|
// Construct and append a VPCMPESTRI instruction to the active function.
|
|
func (c *Context) VPCMPESTRI(i, mx, x operand.Op) {
|
|
if inst, err := x86.VPCMPESTRI(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPCMPESTRI: Packed Compare Explicit Length Strings, Return Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPESTRI imm8 xmm xmm
|
|
// VPCMPESTRI imm8 m128 xmm
|
|
// Construct and append a VPCMPESTRI instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPESTRI(i, mx, x operand.Op) { ctx.VPCMPESTRI(i, mx, x) }
|
|
|
|
// VPCMPESTRM: Packed Compare Explicit Length Strings, Return Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPESTRM imm8 xmm xmm
|
|
// VPCMPESTRM imm8 m128 xmm
|
|
// Construct and append a VPCMPESTRM instruction to the active function.
|
|
func (c *Context) VPCMPESTRM(i, mx, x operand.Op) {
|
|
if inst, err := x86.VPCMPESTRM(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPCMPESTRM: Packed Compare Explicit Length Strings, Return Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPESTRM imm8 xmm xmm
|
|
// VPCMPESTRM imm8 m128 xmm
|
|
// Construct and append a VPCMPESTRM instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPESTRM(i, mx, x operand.Op) { ctx.VPCMPESTRM(i, mx, x) }
|
|
|
|
// VPCMPGTB: Compare Packed Signed Byte Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPGTB xmm xmm xmm
|
|
// VPCMPGTB m128 xmm xmm
|
|
// VPCMPGTB ymm ymm ymm
|
|
// VPCMPGTB m256 ymm ymm
|
|
// Construct and append a VPCMPGTB instruction to the active function.
|
|
func (c *Context) VPCMPGTB(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPCMPGTB(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPCMPGTB: Compare Packed Signed Byte Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPGTB xmm xmm xmm
|
|
// VPCMPGTB m128 xmm xmm
|
|
// VPCMPGTB ymm ymm ymm
|
|
// VPCMPGTB m256 ymm ymm
|
|
// Construct and append a VPCMPGTB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPGTB(mxy, xy, xy1 operand.Op) { ctx.VPCMPGTB(mxy, xy, xy1) }
|
|
|
|
// VPCMPGTD: Compare Packed Signed Doubleword Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPGTD xmm xmm xmm
|
|
// VPCMPGTD m128 xmm xmm
|
|
// VPCMPGTD ymm ymm ymm
|
|
// VPCMPGTD m256 ymm ymm
|
|
// Construct and append a VPCMPGTD instruction to the active function.
|
|
func (c *Context) VPCMPGTD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPCMPGTD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPCMPGTD: Compare Packed Signed Doubleword Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPGTD xmm xmm xmm
|
|
// VPCMPGTD m128 xmm xmm
|
|
// VPCMPGTD ymm ymm ymm
|
|
// VPCMPGTD m256 ymm ymm
|
|
// Construct and append a VPCMPGTD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPGTD(mxy, xy, xy1 operand.Op) { ctx.VPCMPGTD(mxy, xy, xy1) }
|
|
|
|
// VPCMPGTQ: Compare Packed Data for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPGTQ xmm xmm xmm
|
|
// VPCMPGTQ m128 xmm xmm
|
|
// VPCMPGTQ ymm ymm ymm
|
|
// VPCMPGTQ m256 ymm ymm
|
|
// Construct and append a VPCMPGTQ instruction to the active function.
|
|
func (c *Context) VPCMPGTQ(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPCMPGTQ(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPCMPGTQ: Compare Packed Data for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPGTQ xmm xmm xmm
|
|
// VPCMPGTQ m128 xmm xmm
|
|
// VPCMPGTQ ymm ymm ymm
|
|
// VPCMPGTQ m256 ymm ymm
|
|
// Construct and append a VPCMPGTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPGTQ(mxy, xy, xy1 operand.Op) { ctx.VPCMPGTQ(mxy, xy, xy1) }
|
|
|
|
// VPCMPGTW: Compare Packed Signed Word Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPGTW xmm xmm xmm
|
|
// VPCMPGTW m128 xmm xmm
|
|
// VPCMPGTW ymm ymm ymm
|
|
// VPCMPGTW m256 ymm ymm
|
|
// Construct and append a VPCMPGTW instruction to the active function.
|
|
func (c *Context) VPCMPGTW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPCMPGTW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPCMPGTW: Compare Packed Signed Word Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPGTW xmm xmm xmm
|
|
// VPCMPGTW m128 xmm xmm
|
|
// VPCMPGTW ymm ymm ymm
|
|
// VPCMPGTW m256 ymm ymm
|
|
// Construct and append a VPCMPGTW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPGTW(mxy, xy, xy1 operand.Op) { ctx.VPCMPGTW(mxy, xy, xy1) }
|
|
|
|
// VPCMPISTRI: Packed Compare Implicit Length Strings, Return Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPISTRI imm8 xmm xmm
|
|
// VPCMPISTRI imm8 m128 xmm
|
|
// Construct and append a VPCMPISTRI instruction to the active function.
|
|
func (c *Context) VPCMPISTRI(i, mx, x operand.Op) {
|
|
if inst, err := x86.VPCMPISTRI(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPCMPISTRI: Packed Compare Implicit Length Strings, Return Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPISTRI imm8 xmm xmm
|
|
// VPCMPISTRI imm8 m128 xmm
|
|
// Construct and append a VPCMPISTRI instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPISTRI(i, mx, x operand.Op) { ctx.VPCMPISTRI(i, mx, x) }
|
|
|
|
// VPCMPISTRM: Packed Compare Implicit Length Strings, Return Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPISTRM imm8 xmm xmm
|
|
// VPCMPISTRM imm8 m128 xmm
|
|
// Construct and append a VPCMPISTRM instruction to the active function.
|
|
func (c *Context) VPCMPISTRM(i, mx, x operand.Op) {
|
|
if inst, err := x86.VPCMPISTRM(i, mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPCMPISTRM: Packed Compare Implicit Length Strings, Return Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPISTRM imm8 xmm xmm
|
|
// VPCMPISTRM imm8 m128 xmm
|
|
// Construct and append a VPCMPISTRM instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPISTRM(i, mx, x operand.Op) { ctx.VPCMPISTRM(i, mx, x) }
|
|
|
|
// VPERM2F128: Permute Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERM2F128 imm8 ymm ymm ymm
|
|
// VPERM2F128 imm8 m256 ymm ymm
|
|
// Construct and append a VPERM2F128 instruction to the active function.
|
|
func (c *Context) VPERM2F128(i, my, y, y1 operand.Op) {
|
|
if inst, err := x86.VPERM2F128(i, my, y, y1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPERM2F128: Permute Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERM2F128 imm8 ymm ymm ymm
|
|
// VPERM2F128 imm8 m256 ymm ymm
|
|
// Construct and append a VPERM2F128 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERM2F128(i, my, y, y1 operand.Op) { ctx.VPERM2F128(i, my, y, y1) }
|
|
|
|
// VPERM2I128: Permute 128-Bit Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERM2I128 imm8 ymm ymm ymm
|
|
// VPERM2I128 imm8 m256 ymm ymm
|
|
// Construct and append a VPERM2I128 instruction to the active function.
|
|
func (c *Context) VPERM2I128(i, my, y, y1 operand.Op) {
|
|
if inst, err := x86.VPERM2I128(i, my, y, y1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPERM2I128: Permute 128-Bit Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERM2I128 imm8 ymm ymm ymm
|
|
// VPERM2I128 imm8 m256 ymm ymm
|
|
// Construct and append a VPERM2I128 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERM2I128(i, my, y, y1 operand.Op) { ctx.VPERM2I128(i, my, y, y1) }
|
|
|
|
// VPERMD: Permute Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMD ymm ymm ymm
|
|
// VPERMD m256 ymm ymm
|
|
// Construct and append a VPERMD instruction to the active function.
|
|
func (c *Context) VPERMD(my, y, y1 operand.Op) {
|
|
if inst, err := x86.VPERMD(my, y, y1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPERMD: Permute Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMD ymm ymm ymm
|
|
// VPERMD m256 ymm ymm
|
|
// Construct and append a VPERMD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMD(my, y, y1 operand.Op) { ctx.VPERMD(my, y, y1) }
|
|
|
|
// VPERMILPD: Permute Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMILPD imm8 xmm xmm
|
|
// VPERMILPD xmm xmm xmm
|
|
// VPERMILPD m128 xmm xmm
|
|
// VPERMILPD imm8 m128 xmm
|
|
// VPERMILPD imm8 ymm ymm
|
|
// VPERMILPD ymm ymm ymm
|
|
// VPERMILPD m256 ymm ymm
|
|
// VPERMILPD imm8 m256 ymm
|
|
// Construct and append a VPERMILPD instruction to the active function.
|
|
func (c *Context) VPERMILPD(imxy, mxy, xy operand.Op) {
|
|
if inst, err := x86.VPERMILPD(imxy, mxy, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPERMILPD: Permute Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMILPD imm8 xmm xmm
|
|
// VPERMILPD xmm xmm xmm
|
|
// VPERMILPD m128 xmm xmm
|
|
// VPERMILPD imm8 m128 xmm
|
|
// VPERMILPD imm8 ymm ymm
|
|
// VPERMILPD ymm ymm ymm
|
|
// VPERMILPD m256 ymm ymm
|
|
// VPERMILPD imm8 m256 ymm
|
|
// Construct and append a VPERMILPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMILPD(imxy, mxy, xy operand.Op) { ctx.VPERMILPD(imxy, mxy, xy) }
|
|
|
|
// VPERMILPS: Permute Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMILPS imm8 xmm xmm
|
|
// VPERMILPS xmm xmm xmm
|
|
// VPERMILPS m128 xmm xmm
|
|
// VPERMILPS imm8 m128 xmm
|
|
// VPERMILPS imm8 ymm ymm
|
|
// VPERMILPS ymm ymm ymm
|
|
// VPERMILPS m256 ymm ymm
|
|
// VPERMILPS imm8 m256 ymm
|
|
// Construct and append a VPERMILPS instruction to the active function.
|
|
func (c *Context) VPERMILPS(imxy, mxy, xy operand.Op) {
|
|
if inst, err := x86.VPERMILPS(imxy, mxy, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPERMILPS: Permute Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMILPS imm8 xmm xmm
|
|
// VPERMILPS xmm xmm xmm
|
|
// VPERMILPS m128 xmm xmm
|
|
// VPERMILPS imm8 m128 xmm
|
|
// VPERMILPS imm8 ymm ymm
|
|
// VPERMILPS ymm ymm ymm
|
|
// VPERMILPS m256 ymm ymm
|
|
// VPERMILPS imm8 m256 ymm
|
|
// Construct and append a VPERMILPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMILPS(imxy, mxy, xy operand.Op) { ctx.VPERMILPS(imxy, mxy, xy) }
|
|
|
|
// VPERMPD: Permute Double-Precision Floating-Point Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMPD imm8 ymm ymm
|
|
// VPERMPD imm8 m256 ymm
|
|
// Construct and append a VPERMPD instruction to the active function.
|
|
func (c *Context) VPERMPD(i, my, y operand.Op) {
|
|
if inst, err := x86.VPERMPD(i, my, y); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPERMPD: Permute Double-Precision Floating-Point Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMPD imm8 ymm ymm
|
|
// VPERMPD imm8 m256 ymm
|
|
// Construct and append a VPERMPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMPD(i, my, y operand.Op) { ctx.VPERMPD(i, my, y) }
|
|
|
|
// VPERMPS: Permute Single-Precision Floating-Point Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMPS ymm ymm ymm
|
|
// VPERMPS m256 ymm ymm
|
|
// Construct and append a VPERMPS instruction to the active function.
|
|
func (c *Context) VPERMPS(my, y, y1 operand.Op) {
|
|
if inst, err := x86.VPERMPS(my, y, y1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPERMPS: Permute Single-Precision Floating-Point Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMPS ymm ymm ymm
|
|
// VPERMPS m256 ymm ymm
|
|
// Construct and append a VPERMPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMPS(my, y, y1 operand.Op) { ctx.VPERMPS(my, y, y1) }
|
|
|
|
// VPERMQ: Permute Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMQ imm8 ymm ymm
|
|
// VPERMQ imm8 m256 ymm
|
|
// Construct and append a VPERMQ instruction to the active function.
|
|
func (c *Context) VPERMQ(i, my, y operand.Op) {
|
|
if inst, err := x86.VPERMQ(i, my, y); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPERMQ: Permute Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMQ imm8 ymm ymm
|
|
// VPERMQ imm8 m256 ymm
|
|
// Construct and append a VPERMQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMQ(i, my, y operand.Op) { ctx.VPERMQ(i, my, y) }
|
|
|
|
// VPEXTRB: Extract Byte.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXTRB imm8 xmm r32
|
|
// VPEXTRB imm8 xmm m8
|
|
// Construct and append a VPEXTRB instruction to the active function.
|
|
func (c *Context) VPEXTRB(i, x, mr operand.Op) {
|
|
if inst, err := x86.VPEXTRB(i, x, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPEXTRB: Extract Byte.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXTRB imm8 xmm r32
|
|
// VPEXTRB imm8 xmm m8
|
|
// Construct and append a VPEXTRB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPEXTRB(i, x, mr operand.Op) { ctx.VPEXTRB(i, x, mr) }
|
|
|
|
// VPEXTRD: Extract Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXTRD imm8 xmm r32
|
|
// VPEXTRD imm8 xmm m32
|
|
// Construct and append a VPEXTRD instruction to the active function.
|
|
func (c *Context) VPEXTRD(i, x, mr operand.Op) {
|
|
if inst, err := x86.VPEXTRD(i, x, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPEXTRD: Extract Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXTRD imm8 xmm r32
|
|
// VPEXTRD imm8 xmm m32
|
|
// Construct and append a VPEXTRD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPEXTRD(i, x, mr operand.Op) { ctx.VPEXTRD(i, x, mr) }
|
|
|
|
// VPEXTRQ: Extract Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXTRQ imm8 xmm r64
|
|
// VPEXTRQ imm8 xmm m64
|
|
// Construct and append a VPEXTRQ instruction to the active function.
|
|
func (c *Context) VPEXTRQ(i, x, mr operand.Op) {
|
|
if inst, err := x86.VPEXTRQ(i, x, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPEXTRQ: Extract Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXTRQ imm8 xmm r64
|
|
// VPEXTRQ imm8 xmm m64
|
|
// Construct and append a VPEXTRQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPEXTRQ(i, x, mr operand.Op) { ctx.VPEXTRQ(i, x, mr) }
|
|
|
|
// VPEXTRW: Extract Word.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXTRW imm8 xmm r32
|
|
// VPEXTRW imm8 xmm m16
|
|
// Construct and append a VPEXTRW instruction to the active function.
|
|
func (c *Context) VPEXTRW(i, x, mr operand.Op) {
|
|
if inst, err := x86.VPEXTRW(i, x, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPEXTRW: Extract Word.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXTRW imm8 xmm r32
|
|
// VPEXTRW imm8 xmm m16
|
|
// Construct and append a VPEXTRW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPEXTRW(i, x, mr operand.Op) { ctx.VPEXTRW(i, x, mr) }
|
|
|
|
// VPGATHERDD: Gather Packed Doubleword Values Using Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPGATHERDD xmm vm32x xmm
|
|
// VPGATHERDD ymm vm32y ymm
|
|
// Construct and append a VPGATHERDD instruction to the active function.
|
|
func (c *Context) VPGATHERDD(xy, v, xy1 operand.Op) {
|
|
if inst, err := x86.VPGATHERDD(xy, v, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPGATHERDD: Gather Packed Doubleword Values Using Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPGATHERDD xmm vm32x xmm
|
|
// VPGATHERDD ymm vm32y ymm
|
|
// Construct and append a VPGATHERDD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPGATHERDD(xy, v, xy1 operand.Op) { ctx.VPGATHERDD(xy, v, xy1) }
|
|
|
|
// VPGATHERDQ: Gather Packed Quadword Values Using Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPGATHERDQ xmm vm32x xmm
|
|
// VPGATHERDQ ymm vm32x ymm
|
|
// Construct and append a VPGATHERDQ instruction to the active function.
|
|
func (c *Context) VPGATHERDQ(xy, v, xy1 operand.Op) {
|
|
if inst, err := x86.VPGATHERDQ(xy, v, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPGATHERDQ: Gather Packed Quadword Values Using Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPGATHERDQ xmm vm32x xmm
|
|
// VPGATHERDQ ymm vm32x ymm
|
|
// Construct and append a VPGATHERDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPGATHERDQ(xy, v, xy1 operand.Op) { ctx.VPGATHERDQ(xy, v, xy1) }
|
|
|
|
// VPGATHERQD: Gather Packed Doubleword Values Using Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPGATHERQD xmm vm64x xmm
|
|
// VPGATHERQD xmm vm64y xmm
|
|
// Construct and append a VPGATHERQD instruction to the active function.
|
|
func (c *Context) VPGATHERQD(x, v, x1 operand.Op) {
|
|
if inst, err := x86.VPGATHERQD(x, v, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPGATHERQD: Gather Packed Doubleword Values Using Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPGATHERQD xmm vm64x xmm
|
|
// VPGATHERQD xmm vm64y xmm
|
|
// Construct and append a VPGATHERQD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPGATHERQD(x, v, x1 operand.Op) { ctx.VPGATHERQD(x, v, x1) }
|
|
|
|
// VPGATHERQQ: Gather Packed Quadword Values Using Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPGATHERQQ xmm vm64x xmm
|
|
// VPGATHERQQ ymm vm64y ymm
|
|
// Construct and append a VPGATHERQQ instruction to the active function.
|
|
func (c *Context) VPGATHERQQ(xy, v, xy1 operand.Op) {
|
|
if inst, err := x86.VPGATHERQQ(xy, v, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPGATHERQQ: Gather Packed Quadword Values Using Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPGATHERQQ xmm vm64x xmm
|
|
// VPGATHERQQ ymm vm64y ymm
|
|
// Construct and append a VPGATHERQQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPGATHERQQ(xy, v, xy1 operand.Op) { ctx.VPGATHERQQ(xy, v, xy1) }
|
|
|
|
// VPHADDD: Packed Horizontal Add Doubleword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHADDD xmm xmm xmm
|
|
// VPHADDD m128 xmm xmm
|
|
// VPHADDD ymm ymm ymm
|
|
// VPHADDD m256 ymm ymm
|
|
// Construct and append a VPHADDD instruction to the active function.
|
|
func (c *Context) VPHADDD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPHADDD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPHADDD: Packed Horizontal Add Doubleword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHADDD xmm xmm xmm
|
|
// VPHADDD m128 xmm xmm
|
|
// VPHADDD ymm ymm ymm
|
|
// VPHADDD m256 ymm ymm
|
|
// Construct and append a VPHADDD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPHADDD(mxy, xy, xy1 operand.Op) { ctx.VPHADDD(mxy, xy, xy1) }
|
|
|
|
// VPHADDSW: Packed Horizontal Add Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHADDSW xmm xmm xmm
|
|
// VPHADDSW m128 xmm xmm
|
|
// VPHADDSW ymm ymm ymm
|
|
// VPHADDSW m256 ymm ymm
|
|
// Construct and append a VPHADDSW instruction to the active function.
|
|
func (c *Context) VPHADDSW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPHADDSW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPHADDSW: Packed Horizontal Add Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHADDSW xmm xmm xmm
|
|
// VPHADDSW m128 xmm xmm
|
|
// VPHADDSW ymm ymm ymm
|
|
// VPHADDSW m256 ymm ymm
|
|
// Construct and append a VPHADDSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPHADDSW(mxy, xy, xy1 operand.Op) { ctx.VPHADDSW(mxy, xy, xy1) }
|
|
|
|
// VPHADDW: Packed Horizontal Add Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHADDW xmm xmm xmm
|
|
// VPHADDW m128 xmm xmm
|
|
// VPHADDW ymm ymm ymm
|
|
// VPHADDW m256 ymm ymm
|
|
// Construct and append a VPHADDW instruction to the active function.
|
|
func (c *Context) VPHADDW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPHADDW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPHADDW: Packed Horizontal Add Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHADDW xmm xmm xmm
|
|
// VPHADDW m128 xmm xmm
|
|
// VPHADDW ymm ymm ymm
|
|
// VPHADDW m256 ymm ymm
|
|
// Construct and append a VPHADDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPHADDW(mxy, xy, xy1 operand.Op) { ctx.VPHADDW(mxy, xy, xy1) }
|
|
|
|
// VPHMINPOSUW: Packed Horizontal Minimum of Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHMINPOSUW xmm xmm
|
|
// VPHMINPOSUW m128 xmm
|
|
// Construct and append a VPHMINPOSUW instruction to the active function.
|
|
func (c *Context) VPHMINPOSUW(mx, x operand.Op) {
|
|
if inst, err := x86.VPHMINPOSUW(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPHMINPOSUW: Packed Horizontal Minimum of Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHMINPOSUW xmm xmm
|
|
// VPHMINPOSUW m128 xmm
|
|
// Construct and append a VPHMINPOSUW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPHMINPOSUW(mx, x operand.Op) { ctx.VPHMINPOSUW(mx, x) }
|
|
|
|
// VPHSUBD: Packed Horizontal Subtract Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHSUBD xmm xmm xmm
|
|
// VPHSUBD m128 xmm xmm
|
|
// VPHSUBD ymm ymm ymm
|
|
// VPHSUBD m256 ymm ymm
|
|
// Construct and append a VPHSUBD instruction to the active function.
|
|
func (c *Context) VPHSUBD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPHSUBD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPHSUBD: Packed Horizontal Subtract Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHSUBD xmm xmm xmm
|
|
// VPHSUBD m128 xmm xmm
|
|
// VPHSUBD ymm ymm ymm
|
|
// VPHSUBD m256 ymm ymm
|
|
// Construct and append a VPHSUBD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPHSUBD(mxy, xy, xy1 operand.Op) { ctx.VPHSUBD(mxy, xy, xy1) }
|
|
|
|
// VPHSUBSW: Packed Horizontal Subtract Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHSUBSW xmm xmm xmm
|
|
// VPHSUBSW m128 xmm xmm
|
|
// VPHSUBSW ymm ymm ymm
|
|
// VPHSUBSW m256 ymm ymm
|
|
// Construct and append a VPHSUBSW instruction to the active function.
|
|
func (c *Context) VPHSUBSW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPHSUBSW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPHSUBSW: Packed Horizontal Subtract Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHSUBSW xmm xmm xmm
|
|
// VPHSUBSW m128 xmm xmm
|
|
// VPHSUBSW ymm ymm ymm
|
|
// VPHSUBSW m256 ymm ymm
|
|
// Construct and append a VPHSUBSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPHSUBSW(mxy, xy, xy1 operand.Op) { ctx.VPHSUBSW(mxy, xy, xy1) }
|
|
|
|
// VPHSUBW: Packed Horizontal Subtract Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHSUBW xmm xmm xmm
|
|
// VPHSUBW m128 xmm xmm
|
|
// VPHSUBW ymm ymm ymm
|
|
// VPHSUBW m256 ymm ymm
|
|
// Construct and append a VPHSUBW instruction to the active function.
|
|
func (c *Context) VPHSUBW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPHSUBW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPHSUBW: Packed Horizontal Subtract Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHSUBW xmm xmm xmm
|
|
// VPHSUBW m128 xmm xmm
|
|
// VPHSUBW ymm ymm ymm
|
|
// VPHSUBW m256 ymm ymm
|
|
// Construct and append a VPHSUBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPHSUBW(mxy, xy, xy1 operand.Op) { ctx.VPHSUBW(mxy, xy, xy1) }
|
|
|
|
// VPINSRB: Insert Byte.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPINSRB imm8 r32 xmm xmm
|
|
// VPINSRB imm8 m8 xmm xmm
|
|
// Construct and append a VPINSRB instruction to the active function.
|
|
func (c *Context) VPINSRB(i, mr, x, x1 operand.Op) {
|
|
if inst, err := x86.VPINSRB(i, mr, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPINSRB: Insert Byte.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPINSRB imm8 r32 xmm xmm
|
|
// VPINSRB imm8 m8 xmm xmm
|
|
// Construct and append a VPINSRB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPINSRB(i, mr, x, x1 operand.Op) { ctx.VPINSRB(i, mr, x, x1) }
|
|
|
|
// VPINSRD: Insert Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPINSRD imm8 r32 xmm xmm
|
|
// VPINSRD imm8 m32 xmm xmm
|
|
// Construct and append a VPINSRD instruction to the active function.
|
|
func (c *Context) VPINSRD(i, mr, x, x1 operand.Op) {
|
|
if inst, err := x86.VPINSRD(i, mr, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPINSRD: Insert Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPINSRD imm8 r32 xmm xmm
|
|
// VPINSRD imm8 m32 xmm xmm
|
|
// Construct and append a VPINSRD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPINSRD(i, mr, x, x1 operand.Op) { ctx.VPINSRD(i, mr, x, x1) }
|
|
|
|
// VPINSRQ: Insert Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPINSRQ imm8 r64 xmm xmm
|
|
// VPINSRQ imm8 m64 xmm xmm
|
|
// Construct and append a VPINSRQ instruction to the active function.
|
|
func (c *Context) VPINSRQ(i, mr, x, x1 operand.Op) {
|
|
if inst, err := x86.VPINSRQ(i, mr, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPINSRQ: Insert Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPINSRQ imm8 r64 xmm xmm
|
|
// VPINSRQ imm8 m64 xmm xmm
|
|
// Construct and append a VPINSRQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPINSRQ(i, mr, x, x1 operand.Op) { ctx.VPINSRQ(i, mr, x, x1) }
|
|
|
|
// VPINSRW: Insert Word.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPINSRW imm8 r32 xmm xmm
|
|
// VPINSRW imm8 m16 xmm xmm
|
|
// Construct and append a VPINSRW instruction to the active function.
|
|
func (c *Context) VPINSRW(i, mr, x, x1 operand.Op) {
|
|
if inst, err := x86.VPINSRW(i, mr, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPINSRW: Insert Word.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPINSRW imm8 r32 xmm xmm
|
|
// VPINSRW imm8 m16 xmm xmm
|
|
// Construct and append a VPINSRW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPINSRW(i, mr, x, x1 operand.Op) { ctx.VPINSRW(i, mr, x, x1) }
|
|
|
|
// VPMADDUBSW: Multiply and Add Packed Signed and Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADDUBSW xmm xmm xmm
|
|
// VPMADDUBSW m128 xmm xmm
|
|
// VPMADDUBSW ymm ymm ymm
|
|
// VPMADDUBSW m256 ymm ymm
|
|
// Construct and append a VPMADDUBSW instruction to the active function.
|
|
func (c *Context) VPMADDUBSW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPMADDUBSW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMADDUBSW: Multiply and Add Packed Signed and Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADDUBSW xmm xmm xmm
|
|
// VPMADDUBSW m128 xmm xmm
|
|
// VPMADDUBSW ymm ymm ymm
|
|
// VPMADDUBSW m256 ymm ymm
|
|
// Construct and append a VPMADDUBSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMADDUBSW(mxy, xy, xy1 operand.Op) { ctx.VPMADDUBSW(mxy, xy, xy1) }
|
|
|
|
// VPMADDWD: Multiply and Add Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADDWD xmm xmm xmm
|
|
// VPMADDWD m128 xmm xmm
|
|
// VPMADDWD ymm ymm ymm
|
|
// VPMADDWD m256 ymm ymm
|
|
// Construct and append a VPMADDWD instruction to the active function.
|
|
func (c *Context) VPMADDWD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPMADDWD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMADDWD: Multiply and Add Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADDWD xmm xmm xmm
|
|
// VPMADDWD m128 xmm xmm
|
|
// VPMADDWD ymm ymm ymm
|
|
// VPMADDWD m256 ymm ymm
|
|
// Construct and append a VPMADDWD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMADDWD(mxy, xy, xy1 operand.Op) { ctx.VPMADDWD(mxy, xy, xy1) }
|
|
|
|
// VPMASKMOVD: Conditional Move Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMASKMOVD m128 xmm xmm
|
|
// VPMASKMOVD m256 ymm ymm
|
|
// VPMASKMOVD xmm xmm m128
|
|
// VPMASKMOVD ymm ymm m256
|
|
// Construct and append a VPMASKMOVD instruction to the active function.
|
|
func (c *Context) VPMASKMOVD(mxy, xy, mxy1 operand.Op) {
|
|
if inst, err := x86.VPMASKMOVD(mxy, xy, mxy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMASKMOVD: Conditional Move Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMASKMOVD m128 xmm xmm
|
|
// VPMASKMOVD m256 ymm ymm
|
|
// VPMASKMOVD xmm xmm m128
|
|
// VPMASKMOVD ymm ymm m256
|
|
// Construct and append a VPMASKMOVD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMASKMOVD(mxy, xy, mxy1 operand.Op) { ctx.VPMASKMOVD(mxy, xy, mxy1) }
|
|
|
|
// VPMASKMOVQ: Conditional Move Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMASKMOVQ m128 xmm xmm
|
|
// VPMASKMOVQ m256 ymm ymm
|
|
// VPMASKMOVQ xmm xmm m128
|
|
// VPMASKMOVQ ymm ymm m256
|
|
// Construct and append a VPMASKMOVQ instruction to the active function.
|
|
func (c *Context) VPMASKMOVQ(mxy, xy, mxy1 operand.Op) {
|
|
if inst, err := x86.VPMASKMOVQ(mxy, xy, mxy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMASKMOVQ: Conditional Move Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMASKMOVQ m128 xmm xmm
|
|
// VPMASKMOVQ m256 ymm ymm
|
|
// VPMASKMOVQ xmm xmm m128
|
|
// VPMASKMOVQ ymm ymm m256
|
|
// Construct and append a VPMASKMOVQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMASKMOVQ(mxy, xy, mxy1 operand.Op) { ctx.VPMASKMOVQ(mxy, xy, mxy1) }
|
|
|
|
// VPMAXSB: Maximum of Packed Signed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSB xmm xmm xmm
|
|
// VPMAXSB m128 xmm xmm
|
|
// VPMAXSB ymm ymm ymm
|
|
// VPMAXSB m256 ymm ymm
|
|
// Construct and append a VPMAXSB instruction to the active function.
|
|
func (c *Context) VPMAXSB(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPMAXSB(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMAXSB: Maximum of Packed Signed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSB xmm xmm xmm
|
|
// VPMAXSB m128 xmm xmm
|
|
// VPMAXSB ymm ymm ymm
|
|
// VPMAXSB m256 ymm ymm
|
|
// Construct and append a VPMAXSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXSB(mxy, xy, xy1 operand.Op) { ctx.VPMAXSB(mxy, xy, xy1) }
|
|
|
|
// VPMAXSD: Maximum of Packed Signed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSD xmm xmm xmm
|
|
// VPMAXSD m128 xmm xmm
|
|
// VPMAXSD ymm ymm ymm
|
|
// VPMAXSD m256 ymm ymm
|
|
// Construct and append a VPMAXSD instruction to the active function.
|
|
func (c *Context) VPMAXSD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPMAXSD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMAXSD: Maximum of Packed Signed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSD xmm xmm xmm
|
|
// VPMAXSD m128 xmm xmm
|
|
// VPMAXSD ymm ymm ymm
|
|
// VPMAXSD m256 ymm ymm
|
|
// Construct and append a VPMAXSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXSD(mxy, xy, xy1 operand.Op) { ctx.VPMAXSD(mxy, xy, xy1) }
|
|
|
|
// VPMAXSW: Maximum of Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSW xmm xmm xmm
|
|
// VPMAXSW m128 xmm xmm
|
|
// VPMAXSW ymm ymm ymm
|
|
// VPMAXSW m256 ymm ymm
|
|
// Construct and append a VPMAXSW instruction to the active function.
|
|
func (c *Context) VPMAXSW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPMAXSW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMAXSW: Maximum of Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSW xmm xmm xmm
|
|
// VPMAXSW m128 xmm xmm
|
|
// VPMAXSW ymm ymm ymm
|
|
// VPMAXSW m256 ymm ymm
|
|
// Construct and append a VPMAXSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXSW(mxy, xy, xy1 operand.Op) { ctx.VPMAXSW(mxy, xy, xy1) }
|
|
|
|
// VPMAXUB: Maximum of Packed Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUB xmm xmm xmm
|
|
// VPMAXUB m128 xmm xmm
|
|
// VPMAXUB ymm ymm ymm
|
|
// VPMAXUB m256 ymm ymm
|
|
// Construct and append a VPMAXUB instruction to the active function.
|
|
func (c *Context) VPMAXUB(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPMAXUB(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMAXUB: Maximum of Packed Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUB xmm xmm xmm
|
|
// VPMAXUB m128 xmm xmm
|
|
// VPMAXUB ymm ymm ymm
|
|
// VPMAXUB m256 ymm ymm
|
|
// Construct and append a VPMAXUB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXUB(mxy, xy, xy1 operand.Op) { ctx.VPMAXUB(mxy, xy, xy1) }
|
|
|
|
// VPMAXUD: Maximum of Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUD xmm xmm xmm
|
|
// VPMAXUD m128 xmm xmm
|
|
// VPMAXUD ymm ymm ymm
|
|
// VPMAXUD m256 ymm ymm
|
|
// Construct and append a VPMAXUD instruction to the active function.
|
|
func (c *Context) VPMAXUD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPMAXUD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMAXUD: Maximum of Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUD xmm xmm xmm
|
|
// VPMAXUD m128 xmm xmm
|
|
// VPMAXUD ymm ymm ymm
|
|
// VPMAXUD m256 ymm ymm
|
|
// Construct and append a VPMAXUD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXUD(mxy, xy, xy1 operand.Op) { ctx.VPMAXUD(mxy, xy, xy1) }
|
|
|
|
// VPMAXUW: Maximum of Packed Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUW xmm xmm xmm
|
|
// VPMAXUW m128 xmm xmm
|
|
// VPMAXUW ymm ymm ymm
|
|
// VPMAXUW m256 ymm ymm
|
|
// Construct and append a VPMAXUW instruction to the active function.
|
|
func (c *Context) VPMAXUW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPMAXUW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMAXUW: Maximum of Packed Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUW xmm xmm xmm
|
|
// VPMAXUW m128 xmm xmm
|
|
// VPMAXUW ymm ymm ymm
|
|
// VPMAXUW m256 ymm ymm
|
|
// Construct and append a VPMAXUW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXUW(mxy, xy, xy1 operand.Op) { ctx.VPMAXUW(mxy, xy, xy1) }
|
|
|
|
// VPMINSB: Minimum of Packed Signed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSB xmm xmm xmm
|
|
// VPMINSB m128 xmm xmm
|
|
// VPMINSB ymm ymm ymm
|
|
// VPMINSB m256 ymm ymm
|
|
// Construct and append a VPMINSB instruction to the active function.
|
|
func (c *Context) VPMINSB(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPMINSB(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMINSB: Minimum of Packed Signed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSB xmm xmm xmm
|
|
// VPMINSB m128 xmm xmm
|
|
// VPMINSB ymm ymm ymm
|
|
// VPMINSB m256 ymm ymm
|
|
// Construct and append a VPMINSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINSB(mxy, xy, xy1 operand.Op) { ctx.VPMINSB(mxy, xy, xy1) }
|
|
|
|
// VPMINSD: Minimum of Packed Signed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSD xmm xmm xmm
|
|
// VPMINSD m128 xmm xmm
|
|
// VPMINSD ymm ymm ymm
|
|
// VPMINSD m256 ymm ymm
|
|
// Construct and append a VPMINSD instruction to the active function.
|
|
func (c *Context) VPMINSD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPMINSD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMINSD: Minimum of Packed Signed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSD xmm xmm xmm
|
|
// VPMINSD m128 xmm xmm
|
|
// VPMINSD ymm ymm ymm
|
|
// VPMINSD m256 ymm ymm
|
|
// Construct and append a VPMINSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINSD(mxy, xy, xy1 operand.Op) { ctx.VPMINSD(mxy, xy, xy1) }
|
|
|
|
// VPMINSW: Minimum of Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSW xmm xmm xmm
|
|
// VPMINSW m128 xmm xmm
|
|
// VPMINSW ymm ymm ymm
|
|
// VPMINSW m256 ymm ymm
|
|
// Construct and append a VPMINSW instruction to the active function.
|
|
func (c *Context) VPMINSW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPMINSW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMINSW: Minimum of Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSW xmm xmm xmm
|
|
// VPMINSW m128 xmm xmm
|
|
// VPMINSW ymm ymm ymm
|
|
// VPMINSW m256 ymm ymm
|
|
// Construct and append a VPMINSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINSW(mxy, xy, xy1 operand.Op) { ctx.VPMINSW(mxy, xy, xy1) }
|
|
|
|
// VPMINUB: Minimum of Packed Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUB xmm xmm xmm
|
|
// VPMINUB m128 xmm xmm
|
|
// VPMINUB ymm ymm ymm
|
|
// VPMINUB m256 ymm ymm
|
|
// Construct and append a VPMINUB instruction to the active function.
|
|
func (c *Context) VPMINUB(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPMINUB(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMINUB: Minimum of Packed Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUB xmm xmm xmm
|
|
// VPMINUB m128 xmm xmm
|
|
// VPMINUB ymm ymm ymm
|
|
// VPMINUB m256 ymm ymm
|
|
// Construct and append a VPMINUB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINUB(mxy, xy, xy1 operand.Op) { ctx.VPMINUB(mxy, xy, xy1) }
|
|
|
|
// VPMINUD: Minimum of Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUD xmm xmm xmm
|
|
// VPMINUD m128 xmm xmm
|
|
// VPMINUD ymm ymm ymm
|
|
// VPMINUD m256 ymm ymm
|
|
// Construct and append a VPMINUD instruction to the active function.
|
|
func (c *Context) VPMINUD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPMINUD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMINUD: Minimum of Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUD xmm xmm xmm
|
|
// VPMINUD m128 xmm xmm
|
|
// VPMINUD ymm ymm ymm
|
|
// VPMINUD m256 ymm ymm
|
|
// Construct and append a VPMINUD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINUD(mxy, xy, xy1 operand.Op) { ctx.VPMINUD(mxy, xy, xy1) }
|
|
|
|
// VPMINUW: Minimum of Packed Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUW xmm xmm xmm
|
|
// VPMINUW m128 xmm xmm
|
|
// VPMINUW ymm ymm ymm
|
|
// VPMINUW m256 ymm ymm
|
|
// Construct and append a VPMINUW instruction to the active function.
|
|
func (c *Context) VPMINUW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPMINUW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMINUW: Minimum of Packed Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUW xmm xmm xmm
|
|
// VPMINUW m128 xmm xmm
|
|
// VPMINUW ymm ymm ymm
|
|
// VPMINUW m256 ymm ymm
|
|
// Construct and append a VPMINUW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINUW(mxy, xy, xy1 operand.Op) { ctx.VPMINUW(mxy, xy, xy1) }
|
|
|
|
// VPMOVMSKB: Move Byte Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVMSKB xmm r32
|
|
// VPMOVMSKB ymm r32
|
|
// Construct and append a VPMOVMSKB instruction to the active function.
|
|
func (c *Context) VPMOVMSKB(xy, r operand.Op) {
|
|
if inst, err := x86.VPMOVMSKB(xy, r); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMOVMSKB: Move Byte Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVMSKB xmm r32
|
|
// VPMOVMSKB ymm r32
|
|
// Construct and append a VPMOVMSKB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVMSKB(xy, r operand.Op) { ctx.VPMOVMSKB(xy, r) }
|
|
|
|
// VPMOVSXBD: Move Packed Byte Integers to Doubleword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXBD xmm xmm
|
|
// VPMOVSXBD m32 xmm
|
|
// VPMOVSXBD xmm ymm
|
|
// VPMOVSXBD m64 ymm
|
|
// Construct and append a VPMOVSXBD instruction to the active function.
|
|
func (c *Context) VPMOVSXBD(mx, xy operand.Op) {
|
|
if inst, err := x86.VPMOVSXBD(mx, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMOVSXBD: Move Packed Byte Integers to Doubleword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXBD xmm xmm
|
|
// VPMOVSXBD m32 xmm
|
|
// VPMOVSXBD xmm ymm
|
|
// VPMOVSXBD m64 ymm
|
|
// Construct and append a VPMOVSXBD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSXBD(mx, xy operand.Op) { ctx.VPMOVSXBD(mx, xy) }
|
|
|
|
// VPMOVSXBQ: Move Packed Byte Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXBQ xmm xmm
|
|
// VPMOVSXBQ m16 xmm
|
|
// VPMOVSXBQ xmm ymm
|
|
// VPMOVSXBQ m32 ymm
|
|
// Construct and append a VPMOVSXBQ instruction to the active function.
|
|
func (c *Context) VPMOVSXBQ(mx, xy operand.Op) {
|
|
if inst, err := x86.VPMOVSXBQ(mx, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMOVSXBQ: Move Packed Byte Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXBQ xmm xmm
|
|
// VPMOVSXBQ m16 xmm
|
|
// VPMOVSXBQ xmm ymm
|
|
// VPMOVSXBQ m32 ymm
|
|
// Construct and append a VPMOVSXBQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSXBQ(mx, xy operand.Op) { ctx.VPMOVSXBQ(mx, xy) }
|
|
|
|
// VPMOVSXBW: Move Packed Byte Integers to Word Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXBW xmm xmm
|
|
// VPMOVSXBW m64 xmm
|
|
// VPMOVSXBW xmm ymm
|
|
// VPMOVSXBW m128 ymm
|
|
// Construct and append a VPMOVSXBW instruction to the active function.
|
|
func (c *Context) VPMOVSXBW(mx, xy operand.Op) {
|
|
if inst, err := x86.VPMOVSXBW(mx, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMOVSXBW: Move Packed Byte Integers to Word Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXBW xmm xmm
|
|
// VPMOVSXBW m64 xmm
|
|
// VPMOVSXBW xmm ymm
|
|
// VPMOVSXBW m128 ymm
|
|
// Construct and append a VPMOVSXBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSXBW(mx, xy operand.Op) { ctx.VPMOVSXBW(mx, xy) }
|
|
|
|
// VPMOVSXDQ: Move Packed Doubleword Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXDQ xmm xmm
|
|
// VPMOVSXDQ m64 xmm
|
|
// VPMOVSXDQ xmm ymm
|
|
// VPMOVSXDQ m128 ymm
|
|
// Construct and append a VPMOVSXDQ instruction to the active function.
|
|
func (c *Context) VPMOVSXDQ(mx, xy operand.Op) {
|
|
if inst, err := x86.VPMOVSXDQ(mx, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMOVSXDQ: Move Packed Doubleword Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXDQ xmm xmm
|
|
// VPMOVSXDQ m64 xmm
|
|
// VPMOVSXDQ xmm ymm
|
|
// VPMOVSXDQ m128 ymm
|
|
// Construct and append a VPMOVSXDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSXDQ(mx, xy operand.Op) { ctx.VPMOVSXDQ(mx, xy) }
|
|
|
|
// VPMOVSXWD: Move Packed Word Integers to Doubleword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXWD xmm xmm
|
|
// VPMOVSXWD m64 xmm
|
|
// VPMOVSXWD xmm ymm
|
|
// VPMOVSXWD m128 ymm
|
|
// Construct and append a VPMOVSXWD instruction to the active function.
|
|
func (c *Context) VPMOVSXWD(mx, xy operand.Op) {
|
|
if inst, err := x86.VPMOVSXWD(mx, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMOVSXWD: Move Packed Word Integers to Doubleword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXWD xmm xmm
|
|
// VPMOVSXWD m64 xmm
|
|
// VPMOVSXWD xmm ymm
|
|
// VPMOVSXWD m128 ymm
|
|
// Construct and append a VPMOVSXWD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSXWD(mx, xy operand.Op) { ctx.VPMOVSXWD(mx, xy) }
|
|
|
|
// VPMOVSXWQ: Move Packed Word Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXWQ xmm xmm
|
|
// VPMOVSXWQ m32 xmm
|
|
// VPMOVSXWQ xmm ymm
|
|
// VPMOVSXWQ m64 ymm
|
|
// Construct and append a VPMOVSXWQ instruction to the active function.
|
|
func (c *Context) VPMOVSXWQ(mx, xy operand.Op) {
|
|
if inst, err := x86.VPMOVSXWQ(mx, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMOVSXWQ: Move Packed Word Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXWQ xmm xmm
|
|
// VPMOVSXWQ m32 xmm
|
|
// VPMOVSXWQ xmm ymm
|
|
// VPMOVSXWQ m64 ymm
|
|
// Construct and append a VPMOVSXWQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSXWQ(mx, xy operand.Op) { ctx.VPMOVSXWQ(mx, xy) }
|
|
|
|
// VPMOVZXBD: Move Packed Byte Integers to Doubleword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXBD xmm xmm
|
|
// VPMOVZXBD m32 xmm
|
|
// VPMOVZXBD xmm ymm
|
|
// VPMOVZXBD m64 ymm
|
|
// Construct and append a VPMOVZXBD instruction to the active function.
|
|
func (c *Context) VPMOVZXBD(mx, xy operand.Op) {
|
|
if inst, err := x86.VPMOVZXBD(mx, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMOVZXBD: Move Packed Byte Integers to Doubleword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXBD xmm xmm
|
|
// VPMOVZXBD m32 xmm
|
|
// VPMOVZXBD xmm ymm
|
|
// VPMOVZXBD m64 ymm
|
|
// Construct and append a VPMOVZXBD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVZXBD(mx, xy operand.Op) { ctx.VPMOVZXBD(mx, xy) }
|
|
|
|
// VPMOVZXBQ: Move Packed Byte Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXBQ xmm xmm
|
|
// VPMOVZXBQ m16 xmm
|
|
// VPMOVZXBQ xmm ymm
|
|
// VPMOVZXBQ m32 ymm
|
|
// Construct and append a VPMOVZXBQ instruction to the active function.
|
|
func (c *Context) VPMOVZXBQ(mx, xy operand.Op) {
|
|
if inst, err := x86.VPMOVZXBQ(mx, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMOVZXBQ: Move Packed Byte Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXBQ xmm xmm
|
|
// VPMOVZXBQ m16 xmm
|
|
// VPMOVZXBQ xmm ymm
|
|
// VPMOVZXBQ m32 ymm
|
|
// Construct and append a VPMOVZXBQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVZXBQ(mx, xy operand.Op) { ctx.VPMOVZXBQ(mx, xy) }
|
|
|
|
// VPMOVZXBW: Move Packed Byte Integers to Word Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXBW xmm xmm
|
|
// VPMOVZXBW m64 xmm
|
|
// VPMOVZXBW xmm ymm
|
|
// VPMOVZXBW m128 ymm
|
|
// Construct and append a VPMOVZXBW instruction to the active function.
|
|
func (c *Context) VPMOVZXBW(mx, xy operand.Op) {
|
|
if inst, err := x86.VPMOVZXBW(mx, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMOVZXBW: Move Packed Byte Integers to Word Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXBW xmm xmm
|
|
// VPMOVZXBW m64 xmm
|
|
// VPMOVZXBW xmm ymm
|
|
// VPMOVZXBW m128 ymm
|
|
// Construct and append a VPMOVZXBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVZXBW(mx, xy operand.Op) { ctx.VPMOVZXBW(mx, xy) }
|
|
|
|
// VPMOVZXDQ: Move Packed Doubleword Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXDQ xmm xmm
|
|
// VPMOVZXDQ m64 xmm
|
|
// VPMOVZXDQ xmm ymm
|
|
// VPMOVZXDQ m128 ymm
|
|
// Construct and append a VPMOVZXDQ instruction to the active function.
|
|
func (c *Context) VPMOVZXDQ(mx, xy operand.Op) {
|
|
if inst, err := x86.VPMOVZXDQ(mx, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMOVZXDQ: Move Packed Doubleword Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXDQ xmm xmm
|
|
// VPMOVZXDQ m64 xmm
|
|
// VPMOVZXDQ xmm ymm
|
|
// VPMOVZXDQ m128 ymm
|
|
// Construct and append a VPMOVZXDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVZXDQ(mx, xy operand.Op) { ctx.VPMOVZXDQ(mx, xy) }
|
|
|
|
// VPMOVZXWD: Move Packed Word Integers to Doubleword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXWD xmm xmm
|
|
// VPMOVZXWD m64 xmm
|
|
// VPMOVZXWD xmm ymm
|
|
// VPMOVZXWD m128 ymm
|
|
// Construct and append a VPMOVZXWD instruction to the active function.
|
|
func (c *Context) VPMOVZXWD(mx, xy operand.Op) {
|
|
if inst, err := x86.VPMOVZXWD(mx, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMOVZXWD: Move Packed Word Integers to Doubleword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXWD xmm xmm
|
|
// VPMOVZXWD m64 xmm
|
|
// VPMOVZXWD xmm ymm
|
|
// VPMOVZXWD m128 ymm
|
|
// Construct and append a VPMOVZXWD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVZXWD(mx, xy operand.Op) { ctx.VPMOVZXWD(mx, xy) }
|
|
|
|
// VPMOVZXWQ: Move Packed Word Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXWQ xmm xmm
|
|
// VPMOVZXWQ m32 xmm
|
|
// VPMOVZXWQ xmm ymm
|
|
// VPMOVZXWQ m64 ymm
|
|
// Construct and append a VPMOVZXWQ instruction to the active function.
|
|
func (c *Context) VPMOVZXWQ(mx, xy operand.Op) {
|
|
if inst, err := x86.VPMOVZXWQ(mx, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMOVZXWQ: Move Packed Word Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXWQ xmm xmm
|
|
// VPMOVZXWQ m32 xmm
|
|
// VPMOVZXWQ xmm ymm
|
|
// VPMOVZXWQ m64 ymm
|
|
// Construct and append a VPMOVZXWQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVZXWQ(mx, xy operand.Op) { ctx.VPMOVZXWQ(mx, xy) }
|
|
|
|
// VPMULDQ: Multiply Packed Signed Doubleword Integers and Store Quadword Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULDQ xmm xmm xmm
|
|
// VPMULDQ m128 xmm xmm
|
|
// VPMULDQ ymm ymm ymm
|
|
// VPMULDQ m256 ymm ymm
|
|
// Construct and append a VPMULDQ instruction to the active function.
|
|
func (c *Context) VPMULDQ(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPMULDQ(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMULDQ: Multiply Packed Signed Doubleword Integers and Store Quadword Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULDQ xmm xmm xmm
|
|
// VPMULDQ m128 xmm xmm
|
|
// VPMULDQ ymm ymm ymm
|
|
// VPMULDQ m256 ymm ymm
|
|
// Construct and append a VPMULDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULDQ(mxy, xy, xy1 operand.Op) { ctx.VPMULDQ(mxy, xy, xy1) }
|
|
|
|
// VPMULHRSW: Packed Multiply Signed Word Integers and Store High Result with Round and Scale.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULHRSW xmm xmm xmm
|
|
// VPMULHRSW m128 xmm xmm
|
|
// VPMULHRSW ymm ymm ymm
|
|
// VPMULHRSW m256 ymm ymm
|
|
// Construct and append a VPMULHRSW instruction to the active function.
|
|
func (c *Context) VPMULHRSW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPMULHRSW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMULHRSW: Packed Multiply Signed Word Integers and Store High Result with Round and Scale.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULHRSW xmm xmm xmm
|
|
// VPMULHRSW m128 xmm xmm
|
|
// VPMULHRSW ymm ymm ymm
|
|
// VPMULHRSW m256 ymm ymm
|
|
// Construct and append a VPMULHRSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULHRSW(mxy, xy, xy1 operand.Op) { ctx.VPMULHRSW(mxy, xy, xy1) }
|
|
|
|
// VPMULHUW: Multiply Packed Unsigned Word Integers and Store High Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULHUW xmm xmm xmm
|
|
// VPMULHUW m128 xmm xmm
|
|
// VPMULHUW ymm ymm ymm
|
|
// VPMULHUW m256 ymm ymm
|
|
// Construct and append a VPMULHUW instruction to the active function.
|
|
func (c *Context) VPMULHUW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPMULHUW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMULHUW: Multiply Packed Unsigned Word Integers and Store High Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULHUW xmm xmm xmm
|
|
// VPMULHUW m128 xmm xmm
|
|
// VPMULHUW ymm ymm ymm
|
|
// VPMULHUW m256 ymm ymm
|
|
// Construct and append a VPMULHUW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULHUW(mxy, xy, xy1 operand.Op) { ctx.VPMULHUW(mxy, xy, xy1) }
|
|
|
|
// VPMULHW: Multiply Packed Signed Word Integers and Store High Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULHW xmm xmm xmm
|
|
// VPMULHW m128 xmm xmm
|
|
// VPMULHW ymm ymm ymm
|
|
// VPMULHW m256 ymm ymm
|
|
// Construct and append a VPMULHW instruction to the active function.
|
|
func (c *Context) VPMULHW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPMULHW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMULHW: Multiply Packed Signed Word Integers and Store High Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULHW xmm xmm xmm
|
|
// VPMULHW m128 xmm xmm
|
|
// VPMULHW ymm ymm ymm
|
|
// VPMULHW m256 ymm ymm
|
|
// Construct and append a VPMULHW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULHW(mxy, xy, xy1 operand.Op) { ctx.VPMULHW(mxy, xy, xy1) }
|
|
|
|
// VPMULLD: Multiply Packed Signed Doubleword Integers and Store Low Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLD xmm xmm xmm
|
|
// VPMULLD m128 xmm xmm
|
|
// VPMULLD ymm ymm ymm
|
|
// VPMULLD m256 ymm ymm
|
|
// Construct and append a VPMULLD instruction to the active function.
|
|
func (c *Context) VPMULLD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPMULLD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMULLD: Multiply Packed Signed Doubleword Integers and Store Low Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLD xmm xmm xmm
|
|
// VPMULLD m128 xmm xmm
|
|
// VPMULLD ymm ymm ymm
|
|
// VPMULLD m256 ymm ymm
|
|
// Construct and append a VPMULLD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULLD(mxy, xy, xy1 operand.Op) { ctx.VPMULLD(mxy, xy, xy1) }
|
|
|
|
// VPMULLW: Multiply Packed Signed Word Integers and Store Low Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLW xmm xmm xmm
|
|
// VPMULLW m128 xmm xmm
|
|
// VPMULLW ymm ymm ymm
|
|
// VPMULLW m256 ymm ymm
|
|
// Construct and append a VPMULLW instruction to the active function.
|
|
func (c *Context) VPMULLW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPMULLW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMULLW: Multiply Packed Signed Word Integers and Store Low Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLW xmm xmm xmm
|
|
// VPMULLW m128 xmm xmm
|
|
// VPMULLW ymm ymm ymm
|
|
// VPMULLW m256 ymm ymm
|
|
// Construct and append a VPMULLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULLW(mxy, xy, xy1 operand.Op) { ctx.VPMULLW(mxy, xy, xy1) }
|
|
|
|
// VPMULUDQ: Multiply Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULUDQ xmm xmm xmm
|
|
// VPMULUDQ m128 xmm xmm
|
|
// VPMULUDQ ymm ymm ymm
|
|
// VPMULUDQ m256 ymm ymm
|
|
// Construct and append a VPMULUDQ instruction to the active function.
|
|
func (c *Context) VPMULUDQ(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPMULUDQ(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPMULUDQ: Multiply Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULUDQ xmm xmm xmm
|
|
// VPMULUDQ m128 xmm xmm
|
|
// VPMULUDQ ymm ymm ymm
|
|
// VPMULUDQ m256 ymm ymm
|
|
// Construct and append a VPMULUDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULUDQ(mxy, xy, xy1 operand.Op) { ctx.VPMULUDQ(mxy, xy, xy1) }
|
|
|
|
// VPOR: Packed Bitwise Logical OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPOR xmm xmm xmm
|
|
// VPOR m128 xmm xmm
|
|
// VPOR ymm ymm ymm
|
|
// VPOR m256 ymm ymm
|
|
// Construct and append a VPOR instruction to the active function.
|
|
func (c *Context) VPOR(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPOR(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPOR: Packed Bitwise Logical OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPOR xmm xmm xmm
|
|
// VPOR m128 xmm xmm
|
|
// VPOR ymm ymm ymm
|
|
// VPOR m256 ymm ymm
|
|
// Construct and append a VPOR instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPOR(mxy, xy, xy1 operand.Op) { ctx.VPOR(mxy, xy, xy1) }
|
|
|
|
// VPSADBW: Compute Sum of Absolute Differences.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSADBW xmm xmm xmm
|
|
// VPSADBW m128 xmm xmm
|
|
// VPSADBW ymm ymm ymm
|
|
// VPSADBW m256 ymm ymm
|
|
// Construct and append a VPSADBW instruction to the active function.
|
|
func (c *Context) VPSADBW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSADBW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSADBW: Compute Sum of Absolute Differences.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSADBW xmm xmm xmm
|
|
// VPSADBW m128 xmm xmm
|
|
// VPSADBW ymm ymm ymm
|
|
// VPSADBW m256 ymm ymm
|
|
// Construct and append a VPSADBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSADBW(mxy, xy, xy1 operand.Op) { ctx.VPSADBW(mxy, xy, xy1) }
|
|
|
|
// VPSHUFB: Packed Shuffle Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFB xmm xmm xmm
|
|
// VPSHUFB m128 xmm xmm
|
|
// VPSHUFB ymm ymm ymm
|
|
// VPSHUFB m256 ymm ymm
|
|
// Construct and append a VPSHUFB instruction to the active function.
|
|
func (c *Context) VPSHUFB(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSHUFB(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSHUFB: Packed Shuffle Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFB xmm xmm xmm
|
|
// VPSHUFB m128 xmm xmm
|
|
// VPSHUFB ymm ymm ymm
|
|
// VPSHUFB m256 ymm ymm
|
|
// Construct and append a VPSHUFB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSHUFB(mxy, xy, xy1 operand.Op) { ctx.VPSHUFB(mxy, xy, xy1) }
|
|
|
|
// VPSHUFD: Shuffle Packed Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFD imm8 xmm xmm
|
|
// VPSHUFD imm8 m128 xmm
|
|
// VPSHUFD imm8 ymm ymm
|
|
// VPSHUFD imm8 m256 ymm
|
|
// Construct and append a VPSHUFD instruction to the active function.
|
|
func (c *Context) VPSHUFD(i, mxy, xy operand.Op) {
|
|
if inst, err := x86.VPSHUFD(i, mxy, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSHUFD: Shuffle Packed Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFD imm8 xmm xmm
|
|
// VPSHUFD imm8 m128 xmm
|
|
// VPSHUFD imm8 ymm ymm
|
|
// VPSHUFD imm8 m256 ymm
|
|
// Construct and append a VPSHUFD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSHUFD(i, mxy, xy operand.Op) { ctx.VPSHUFD(i, mxy, xy) }
|
|
|
|
// VPSHUFHW: Shuffle Packed High Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFHW imm8 xmm xmm
|
|
// VPSHUFHW imm8 m128 xmm
|
|
// VPSHUFHW imm8 ymm ymm
|
|
// VPSHUFHW imm8 m256 ymm
|
|
// Construct and append a VPSHUFHW instruction to the active function.
|
|
func (c *Context) VPSHUFHW(i, mxy, xy operand.Op) {
|
|
if inst, err := x86.VPSHUFHW(i, mxy, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSHUFHW: Shuffle Packed High Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFHW imm8 xmm xmm
|
|
// VPSHUFHW imm8 m128 xmm
|
|
// VPSHUFHW imm8 ymm ymm
|
|
// VPSHUFHW imm8 m256 ymm
|
|
// Construct and append a VPSHUFHW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSHUFHW(i, mxy, xy operand.Op) { ctx.VPSHUFHW(i, mxy, xy) }
|
|
|
|
// VPSHUFLW: Shuffle Packed Low Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFLW imm8 xmm xmm
|
|
// VPSHUFLW imm8 m128 xmm
|
|
// VPSHUFLW imm8 ymm ymm
|
|
// VPSHUFLW imm8 m256 ymm
|
|
// Construct and append a VPSHUFLW instruction to the active function.
|
|
func (c *Context) VPSHUFLW(i, mxy, xy operand.Op) {
|
|
if inst, err := x86.VPSHUFLW(i, mxy, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSHUFLW: Shuffle Packed Low Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFLW imm8 xmm xmm
|
|
// VPSHUFLW imm8 m128 xmm
|
|
// VPSHUFLW imm8 ymm ymm
|
|
// VPSHUFLW imm8 m256 ymm
|
|
// Construct and append a VPSHUFLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSHUFLW(i, mxy, xy operand.Op) { ctx.VPSHUFLW(i, mxy, xy) }
|
|
|
|
// VPSIGNB: Packed Sign of Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSIGNB xmm xmm xmm
|
|
// VPSIGNB m128 xmm xmm
|
|
// VPSIGNB ymm ymm ymm
|
|
// VPSIGNB m256 ymm ymm
|
|
// Construct and append a VPSIGNB instruction to the active function.
|
|
func (c *Context) VPSIGNB(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSIGNB(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSIGNB: Packed Sign of Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSIGNB xmm xmm xmm
|
|
// VPSIGNB m128 xmm xmm
|
|
// VPSIGNB ymm ymm ymm
|
|
// VPSIGNB m256 ymm ymm
|
|
// Construct and append a VPSIGNB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSIGNB(mxy, xy, xy1 operand.Op) { ctx.VPSIGNB(mxy, xy, xy1) }
|
|
|
|
// VPSIGND: Packed Sign of Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSIGND xmm xmm xmm
|
|
// VPSIGND m128 xmm xmm
|
|
// VPSIGND ymm ymm ymm
|
|
// VPSIGND m256 ymm ymm
|
|
// Construct and append a VPSIGND instruction to the active function.
|
|
func (c *Context) VPSIGND(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSIGND(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSIGND: Packed Sign of Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSIGND xmm xmm xmm
|
|
// VPSIGND m128 xmm xmm
|
|
// VPSIGND ymm ymm ymm
|
|
// VPSIGND m256 ymm ymm
|
|
// Construct and append a VPSIGND instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSIGND(mxy, xy, xy1 operand.Op) { ctx.VPSIGND(mxy, xy, xy1) }
|
|
|
|
// VPSIGNW: Packed Sign of Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSIGNW xmm xmm xmm
|
|
// VPSIGNW m128 xmm xmm
|
|
// VPSIGNW ymm ymm ymm
|
|
// VPSIGNW m256 ymm ymm
|
|
// Construct and append a VPSIGNW instruction to the active function.
|
|
func (c *Context) VPSIGNW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSIGNW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSIGNW: Packed Sign of Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSIGNW xmm xmm xmm
|
|
// VPSIGNW m128 xmm xmm
|
|
// VPSIGNW ymm ymm ymm
|
|
// VPSIGNW m256 ymm ymm
|
|
// Construct and append a VPSIGNW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSIGNW(mxy, xy, xy1 operand.Op) { ctx.VPSIGNW(mxy, xy, xy1) }
|
|
|
|
// VPSLLD: Shift Packed Doubleword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLD imm8 xmm xmm
|
|
// VPSLLD xmm xmm xmm
|
|
// VPSLLD m128 xmm xmm
|
|
// VPSLLD imm8 ymm ymm
|
|
// VPSLLD xmm ymm ymm
|
|
// VPSLLD m128 ymm ymm
|
|
// Construct and append a VPSLLD instruction to the active function.
|
|
func (c *Context) VPSLLD(imx, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSLLD(imx, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSLLD: Shift Packed Doubleword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLD imm8 xmm xmm
|
|
// VPSLLD xmm xmm xmm
|
|
// VPSLLD m128 xmm xmm
|
|
// VPSLLD imm8 ymm ymm
|
|
// VPSLLD xmm ymm ymm
|
|
// VPSLLD m128 ymm ymm
|
|
// Construct and append a VPSLLD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLD(imx, xy, xy1 operand.Op) { ctx.VPSLLD(imx, xy, xy1) }
|
|
|
|
// VPSLLDQ: Shift Packed Double Quadword Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLDQ imm8 xmm xmm
|
|
// VPSLLDQ imm8 ymm ymm
|
|
// Construct and append a VPSLLDQ instruction to the active function.
|
|
func (c *Context) VPSLLDQ(i, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSLLDQ(i, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSLLDQ: Shift Packed Double Quadword Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLDQ imm8 xmm xmm
|
|
// VPSLLDQ imm8 ymm ymm
|
|
// Construct and append a VPSLLDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLDQ(i, xy, xy1 operand.Op) { ctx.VPSLLDQ(i, xy, xy1) }
|
|
|
|
// VPSLLQ: Shift Packed Quadword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLQ imm8 xmm xmm
|
|
// VPSLLQ xmm xmm xmm
|
|
// VPSLLQ m128 xmm xmm
|
|
// VPSLLQ imm8 ymm ymm
|
|
// VPSLLQ xmm ymm ymm
|
|
// VPSLLQ m128 ymm ymm
|
|
// Construct and append a VPSLLQ instruction to the active function.
|
|
func (c *Context) VPSLLQ(imx, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSLLQ(imx, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSLLQ: Shift Packed Quadword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLQ imm8 xmm xmm
|
|
// VPSLLQ xmm xmm xmm
|
|
// VPSLLQ m128 xmm xmm
|
|
// VPSLLQ imm8 ymm ymm
|
|
// VPSLLQ xmm ymm ymm
|
|
// VPSLLQ m128 ymm ymm
|
|
// Construct and append a VPSLLQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLQ(imx, xy, xy1 operand.Op) { ctx.VPSLLQ(imx, xy, xy1) }
|
|
|
|
// VPSLLVD: Variable Shift Packed Doubleword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVD xmm xmm xmm
|
|
// VPSLLVD m128 xmm xmm
|
|
// VPSLLVD ymm ymm ymm
|
|
// VPSLLVD m256 ymm ymm
|
|
// Construct and append a VPSLLVD instruction to the active function.
|
|
func (c *Context) VPSLLVD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSLLVD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSLLVD: Variable Shift Packed Doubleword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVD xmm xmm xmm
|
|
// VPSLLVD m128 xmm xmm
|
|
// VPSLLVD ymm ymm ymm
|
|
// VPSLLVD m256 ymm ymm
|
|
// Construct and append a VPSLLVD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLVD(mxy, xy, xy1 operand.Op) { ctx.VPSLLVD(mxy, xy, xy1) }
|
|
|
|
// VPSLLVQ: Variable Shift Packed Quadword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVQ xmm xmm xmm
|
|
// VPSLLVQ m128 xmm xmm
|
|
// VPSLLVQ ymm ymm ymm
|
|
// VPSLLVQ m256 ymm ymm
|
|
// Construct and append a VPSLLVQ instruction to the active function.
|
|
func (c *Context) VPSLLVQ(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSLLVQ(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSLLVQ: Variable Shift Packed Quadword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVQ xmm xmm xmm
|
|
// VPSLLVQ m128 xmm xmm
|
|
// VPSLLVQ ymm ymm ymm
|
|
// VPSLLVQ m256 ymm ymm
|
|
// Construct and append a VPSLLVQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLVQ(mxy, xy, xy1 operand.Op) { ctx.VPSLLVQ(mxy, xy, xy1) }
|
|
|
|
// VPSLLW: Shift Packed Word Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLW imm8 xmm xmm
|
|
// VPSLLW xmm xmm xmm
|
|
// VPSLLW m128 xmm xmm
|
|
// VPSLLW imm8 ymm ymm
|
|
// VPSLLW xmm ymm ymm
|
|
// VPSLLW m128 ymm ymm
|
|
// Construct and append a VPSLLW instruction to the active function.
|
|
func (c *Context) VPSLLW(imx, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSLLW(imx, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSLLW: Shift Packed Word Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLW imm8 xmm xmm
|
|
// VPSLLW xmm xmm xmm
|
|
// VPSLLW m128 xmm xmm
|
|
// VPSLLW imm8 ymm ymm
|
|
// VPSLLW xmm ymm ymm
|
|
// VPSLLW m128 ymm ymm
|
|
// Construct and append a VPSLLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLW(imx, xy, xy1 operand.Op) { ctx.VPSLLW(imx, xy, xy1) }
|
|
|
|
// VPSRAD: Shift Packed Doubleword Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAD imm8 xmm xmm
|
|
// VPSRAD xmm xmm xmm
|
|
// VPSRAD m128 xmm xmm
|
|
// VPSRAD imm8 ymm ymm
|
|
// VPSRAD xmm ymm ymm
|
|
// VPSRAD m128 ymm ymm
|
|
// Construct and append a VPSRAD instruction to the active function.
|
|
func (c *Context) VPSRAD(imx, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSRAD(imx, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSRAD: Shift Packed Doubleword Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAD imm8 xmm xmm
|
|
// VPSRAD xmm xmm xmm
|
|
// VPSRAD m128 xmm xmm
|
|
// VPSRAD imm8 ymm ymm
|
|
// VPSRAD xmm ymm ymm
|
|
// VPSRAD m128 ymm ymm
|
|
// Construct and append a VPSRAD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRAD(imx, xy, xy1 operand.Op) { ctx.VPSRAD(imx, xy, xy1) }
|
|
|
|
// VPSRAVD: Variable Shift Packed Doubleword Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAVD xmm xmm xmm
|
|
// VPSRAVD m128 xmm xmm
|
|
// VPSRAVD ymm ymm ymm
|
|
// VPSRAVD m256 ymm ymm
|
|
// Construct and append a VPSRAVD instruction to the active function.
|
|
func (c *Context) VPSRAVD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSRAVD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSRAVD: Variable Shift Packed Doubleword Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAVD xmm xmm xmm
|
|
// VPSRAVD m128 xmm xmm
|
|
// VPSRAVD ymm ymm ymm
|
|
// VPSRAVD m256 ymm ymm
|
|
// Construct and append a VPSRAVD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRAVD(mxy, xy, xy1 operand.Op) { ctx.VPSRAVD(mxy, xy, xy1) }
|
|
|
|
// VPSRAW: Shift Packed Word Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAW imm8 xmm xmm
|
|
// VPSRAW xmm xmm xmm
|
|
// VPSRAW m128 xmm xmm
|
|
// VPSRAW imm8 ymm ymm
|
|
// VPSRAW xmm ymm ymm
|
|
// VPSRAW m128 ymm ymm
|
|
// Construct and append a VPSRAW instruction to the active function.
|
|
func (c *Context) VPSRAW(imx, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSRAW(imx, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSRAW: Shift Packed Word Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAW imm8 xmm xmm
|
|
// VPSRAW xmm xmm xmm
|
|
// VPSRAW m128 xmm xmm
|
|
// VPSRAW imm8 ymm ymm
|
|
// VPSRAW xmm ymm ymm
|
|
// VPSRAW m128 ymm ymm
|
|
// Construct and append a VPSRAW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRAW(imx, xy, xy1 operand.Op) { ctx.VPSRAW(imx, xy, xy1) }
|
|
|
|
// VPSRLD: Shift Packed Doubleword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLD imm8 xmm xmm
|
|
// VPSRLD xmm xmm xmm
|
|
// VPSRLD m128 xmm xmm
|
|
// VPSRLD imm8 ymm ymm
|
|
// VPSRLD xmm ymm ymm
|
|
// VPSRLD m128 ymm ymm
|
|
// Construct and append a VPSRLD instruction to the active function.
|
|
func (c *Context) VPSRLD(imx, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSRLD(imx, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSRLD: Shift Packed Doubleword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLD imm8 xmm xmm
|
|
// VPSRLD xmm xmm xmm
|
|
// VPSRLD m128 xmm xmm
|
|
// VPSRLD imm8 ymm ymm
|
|
// VPSRLD xmm ymm ymm
|
|
// VPSRLD m128 ymm ymm
|
|
// Construct and append a VPSRLD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLD(imx, xy, xy1 operand.Op) { ctx.VPSRLD(imx, xy, xy1) }
|
|
|
|
// VPSRLDQ: Shift Packed Double Quadword Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLDQ imm8 xmm xmm
|
|
// VPSRLDQ imm8 ymm ymm
|
|
// Construct and append a VPSRLDQ instruction to the active function.
|
|
func (c *Context) VPSRLDQ(i, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSRLDQ(i, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSRLDQ: Shift Packed Double Quadword Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLDQ imm8 xmm xmm
|
|
// VPSRLDQ imm8 ymm ymm
|
|
// Construct and append a VPSRLDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLDQ(i, xy, xy1 operand.Op) { ctx.VPSRLDQ(i, xy, xy1) }
|
|
|
|
// VPSRLQ: Shift Packed Quadword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLQ imm8 xmm xmm
|
|
// VPSRLQ xmm xmm xmm
|
|
// VPSRLQ m128 xmm xmm
|
|
// VPSRLQ imm8 ymm ymm
|
|
// VPSRLQ xmm ymm ymm
|
|
// VPSRLQ m128 ymm ymm
|
|
// Construct and append a VPSRLQ instruction to the active function.
|
|
func (c *Context) VPSRLQ(imx, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSRLQ(imx, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSRLQ: Shift Packed Quadword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLQ imm8 xmm xmm
|
|
// VPSRLQ xmm xmm xmm
|
|
// VPSRLQ m128 xmm xmm
|
|
// VPSRLQ imm8 ymm ymm
|
|
// VPSRLQ xmm ymm ymm
|
|
// VPSRLQ m128 ymm ymm
|
|
// Construct and append a VPSRLQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLQ(imx, xy, xy1 operand.Op) { ctx.VPSRLQ(imx, xy, xy1) }
|
|
|
|
// VPSRLVD: Variable Shift Packed Doubleword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVD xmm xmm xmm
|
|
// VPSRLVD m128 xmm xmm
|
|
// VPSRLVD ymm ymm ymm
|
|
// VPSRLVD m256 ymm ymm
|
|
// Construct and append a VPSRLVD instruction to the active function.
|
|
func (c *Context) VPSRLVD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSRLVD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSRLVD: Variable Shift Packed Doubleword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVD xmm xmm xmm
|
|
// VPSRLVD m128 xmm xmm
|
|
// VPSRLVD ymm ymm ymm
|
|
// VPSRLVD m256 ymm ymm
|
|
// Construct and append a VPSRLVD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLVD(mxy, xy, xy1 operand.Op) { ctx.VPSRLVD(mxy, xy, xy1) }
|
|
|
|
// VPSRLVQ: Variable Shift Packed Quadword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVQ xmm xmm xmm
|
|
// VPSRLVQ m128 xmm xmm
|
|
// VPSRLVQ ymm ymm ymm
|
|
// VPSRLVQ m256 ymm ymm
|
|
// Construct and append a VPSRLVQ instruction to the active function.
|
|
func (c *Context) VPSRLVQ(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSRLVQ(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSRLVQ: Variable Shift Packed Quadword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVQ xmm xmm xmm
|
|
// VPSRLVQ m128 xmm xmm
|
|
// VPSRLVQ ymm ymm ymm
|
|
// VPSRLVQ m256 ymm ymm
|
|
// Construct and append a VPSRLVQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLVQ(mxy, xy, xy1 operand.Op) { ctx.VPSRLVQ(mxy, xy, xy1) }
|
|
|
|
// VPSRLW: Shift Packed Word Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLW imm8 xmm xmm
|
|
// VPSRLW xmm xmm xmm
|
|
// VPSRLW m128 xmm xmm
|
|
// VPSRLW imm8 ymm ymm
|
|
// VPSRLW xmm ymm ymm
|
|
// VPSRLW m128 ymm ymm
|
|
// Construct and append a VPSRLW instruction to the active function.
|
|
func (c *Context) VPSRLW(imx, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSRLW(imx, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSRLW: Shift Packed Word Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLW imm8 xmm xmm
|
|
// VPSRLW xmm xmm xmm
|
|
// VPSRLW m128 xmm xmm
|
|
// VPSRLW imm8 ymm ymm
|
|
// VPSRLW xmm ymm ymm
|
|
// VPSRLW m128 ymm ymm
|
|
// Construct and append a VPSRLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLW(imx, xy, xy1 operand.Op) { ctx.VPSRLW(imx, xy, xy1) }
|
|
|
|
// VPSUBB: Subtract Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBB xmm xmm xmm
|
|
// VPSUBB m128 xmm xmm
|
|
// VPSUBB ymm ymm ymm
|
|
// VPSUBB m256 ymm ymm
|
|
// Construct and append a VPSUBB instruction to the active function.
|
|
func (c *Context) VPSUBB(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSUBB(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSUBB: Subtract Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBB xmm xmm xmm
|
|
// VPSUBB m128 xmm xmm
|
|
// VPSUBB ymm ymm ymm
|
|
// VPSUBB m256 ymm ymm
|
|
// Construct and append a VPSUBB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBB(mxy, xy, xy1 operand.Op) { ctx.VPSUBB(mxy, xy, xy1) }
|
|
|
|
// VPSUBD: Subtract Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBD xmm xmm xmm
|
|
// VPSUBD m128 xmm xmm
|
|
// VPSUBD ymm ymm ymm
|
|
// VPSUBD m256 ymm ymm
|
|
// Construct and append a VPSUBD instruction to the active function.
|
|
func (c *Context) VPSUBD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSUBD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSUBD: Subtract Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBD xmm xmm xmm
|
|
// VPSUBD m128 xmm xmm
|
|
// VPSUBD ymm ymm ymm
|
|
// VPSUBD m256 ymm ymm
|
|
// Construct and append a VPSUBD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBD(mxy, xy, xy1 operand.Op) { ctx.VPSUBD(mxy, xy, xy1) }
|
|
|
|
// VPSUBQ: Subtract Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBQ xmm xmm xmm
|
|
// VPSUBQ m128 xmm xmm
|
|
// VPSUBQ ymm ymm ymm
|
|
// VPSUBQ m256 ymm ymm
|
|
// Construct and append a VPSUBQ instruction to the active function.
|
|
func (c *Context) VPSUBQ(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSUBQ(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSUBQ: Subtract Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBQ xmm xmm xmm
|
|
// VPSUBQ m128 xmm xmm
|
|
// VPSUBQ ymm ymm ymm
|
|
// VPSUBQ m256 ymm ymm
|
|
// Construct and append a VPSUBQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBQ(mxy, xy, xy1 operand.Op) { ctx.VPSUBQ(mxy, xy, xy1) }
|
|
|
|
// VPSUBSB: Subtract Packed Signed Byte Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBSB xmm xmm xmm
|
|
// VPSUBSB m128 xmm xmm
|
|
// VPSUBSB ymm ymm ymm
|
|
// VPSUBSB m256 ymm ymm
|
|
// Construct and append a VPSUBSB instruction to the active function.
|
|
func (c *Context) VPSUBSB(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSUBSB(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSUBSB: Subtract Packed Signed Byte Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBSB xmm xmm xmm
|
|
// VPSUBSB m128 xmm xmm
|
|
// VPSUBSB ymm ymm ymm
|
|
// VPSUBSB m256 ymm ymm
|
|
// Construct and append a VPSUBSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBSB(mxy, xy, xy1 operand.Op) { ctx.VPSUBSB(mxy, xy, xy1) }
|
|
|
|
// VPSUBSW: Subtract Packed Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBSW xmm xmm xmm
|
|
// VPSUBSW m128 xmm xmm
|
|
// VPSUBSW ymm ymm ymm
|
|
// VPSUBSW m256 ymm ymm
|
|
// Construct and append a VPSUBSW instruction to the active function.
|
|
func (c *Context) VPSUBSW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSUBSW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSUBSW: Subtract Packed Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBSW xmm xmm xmm
|
|
// VPSUBSW m128 xmm xmm
|
|
// VPSUBSW ymm ymm ymm
|
|
// VPSUBSW m256 ymm ymm
|
|
// Construct and append a VPSUBSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBSW(mxy, xy, xy1 operand.Op) { ctx.VPSUBSW(mxy, xy, xy1) }
|
|
|
|
// VPSUBUSB: Subtract Packed Unsigned Byte Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBUSB xmm xmm xmm
|
|
// VPSUBUSB m128 xmm xmm
|
|
// VPSUBUSB ymm ymm ymm
|
|
// VPSUBUSB m256 ymm ymm
|
|
// Construct and append a VPSUBUSB instruction to the active function.
|
|
func (c *Context) VPSUBUSB(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSUBUSB(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSUBUSB: Subtract Packed Unsigned Byte Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBUSB xmm xmm xmm
|
|
// VPSUBUSB m128 xmm xmm
|
|
// VPSUBUSB ymm ymm ymm
|
|
// VPSUBUSB m256 ymm ymm
|
|
// Construct and append a VPSUBUSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBUSB(mxy, xy, xy1 operand.Op) { ctx.VPSUBUSB(mxy, xy, xy1) }
|
|
|
|
// VPSUBUSW: Subtract Packed Unsigned Word Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBUSW xmm xmm xmm
|
|
// VPSUBUSW m128 xmm xmm
|
|
// VPSUBUSW ymm ymm ymm
|
|
// VPSUBUSW m256 ymm ymm
|
|
// Construct and append a VPSUBUSW instruction to the active function.
|
|
func (c *Context) VPSUBUSW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSUBUSW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSUBUSW: Subtract Packed Unsigned Word Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBUSW xmm xmm xmm
|
|
// VPSUBUSW m128 xmm xmm
|
|
// VPSUBUSW ymm ymm ymm
|
|
// VPSUBUSW m256 ymm ymm
|
|
// Construct and append a VPSUBUSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBUSW(mxy, xy, xy1 operand.Op) { ctx.VPSUBUSW(mxy, xy, xy1) }
|
|
|
|
// VPSUBW: Subtract Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBW xmm xmm xmm
|
|
// VPSUBW m128 xmm xmm
|
|
// VPSUBW ymm ymm ymm
|
|
// VPSUBW m256 ymm ymm
|
|
// Construct and append a VPSUBW instruction to the active function.
|
|
func (c *Context) VPSUBW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPSUBW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPSUBW: Subtract Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBW xmm xmm xmm
|
|
// VPSUBW m128 xmm xmm
|
|
// VPSUBW ymm ymm ymm
|
|
// VPSUBW m256 ymm ymm
|
|
// Construct and append a VPSUBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBW(mxy, xy, xy1 operand.Op) { ctx.VPSUBW(mxy, xy, xy1) }
|
|
|
|
// VPTEST: Packed Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTEST xmm xmm
|
|
// VPTEST m128 xmm
|
|
// VPTEST ymm ymm
|
|
// VPTEST m256 ymm
|
|
// Construct and append a VPTEST instruction to the active function.
|
|
func (c *Context) VPTEST(mxy, xy operand.Op) {
|
|
if inst, err := x86.VPTEST(mxy, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPTEST: Packed Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTEST xmm xmm
|
|
// VPTEST m128 xmm
|
|
// VPTEST ymm ymm
|
|
// VPTEST m256 ymm
|
|
// Construct and append a VPTEST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPTEST(mxy, xy operand.Op) { ctx.VPTEST(mxy, xy) }
|
|
|
|
// VPUNPCKHBW: Unpack and Interleave High-Order Bytes into Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHBW xmm xmm xmm
|
|
// VPUNPCKHBW m128 xmm xmm
|
|
// VPUNPCKHBW ymm ymm ymm
|
|
// VPUNPCKHBW m256 ymm ymm
|
|
// Construct and append a VPUNPCKHBW instruction to the active function.
|
|
func (c *Context) VPUNPCKHBW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPUNPCKHBW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPUNPCKHBW: Unpack and Interleave High-Order Bytes into Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHBW xmm xmm xmm
|
|
// VPUNPCKHBW m128 xmm xmm
|
|
// VPUNPCKHBW ymm ymm ymm
|
|
// VPUNPCKHBW m256 ymm ymm
|
|
// Construct and append a VPUNPCKHBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKHBW(mxy, xy, xy1 operand.Op) { ctx.VPUNPCKHBW(mxy, xy, xy1) }
|
|
|
|
// VPUNPCKHDQ: Unpack and Interleave High-Order Doublewords into Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHDQ xmm xmm xmm
|
|
// VPUNPCKHDQ m128 xmm xmm
|
|
// VPUNPCKHDQ ymm ymm ymm
|
|
// VPUNPCKHDQ m256 ymm ymm
|
|
// Construct and append a VPUNPCKHDQ instruction to the active function.
|
|
func (c *Context) VPUNPCKHDQ(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPUNPCKHDQ(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPUNPCKHDQ: Unpack and Interleave High-Order Doublewords into Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHDQ xmm xmm xmm
|
|
// VPUNPCKHDQ m128 xmm xmm
|
|
// VPUNPCKHDQ ymm ymm ymm
|
|
// VPUNPCKHDQ m256 ymm ymm
|
|
// Construct and append a VPUNPCKHDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKHDQ(mxy, xy, xy1 operand.Op) { ctx.VPUNPCKHDQ(mxy, xy, xy1) }
|
|
|
|
// VPUNPCKHQDQ: Unpack and Interleave High-Order Quadwords into Double Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHQDQ xmm xmm xmm
|
|
// VPUNPCKHQDQ m128 xmm xmm
|
|
// VPUNPCKHQDQ ymm ymm ymm
|
|
// VPUNPCKHQDQ m256 ymm ymm
|
|
// Construct and append a VPUNPCKHQDQ instruction to the active function.
|
|
func (c *Context) VPUNPCKHQDQ(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPUNPCKHQDQ(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPUNPCKHQDQ: Unpack and Interleave High-Order Quadwords into Double Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHQDQ xmm xmm xmm
|
|
// VPUNPCKHQDQ m128 xmm xmm
|
|
// VPUNPCKHQDQ ymm ymm ymm
|
|
// VPUNPCKHQDQ m256 ymm ymm
|
|
// Construct and append a VPUNPCKHQDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKHQDQ(mxy, xy, xy1 operand.Op) { ctx.VPUNPCKHQDQ(mxy, xy, xy1) }
|
|
|
|
// VPUNPCKHWD: Unpack and Interleave High-Order Words into Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHWD xmm xmm xmm
|
|
// VPUNPCKHWD m128 xmm xmm
|
|
// VPUNPCKHWD ymm ymm ymm
|
|
// VPUNPCKHWD m256 ymm ymm
|
|
// Construct and append a VPUNPCKHWD instruction to the active function.
|
|
func (c *Context) VPUNPCKHWD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPUNPCKHWD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPUNPCKHWD: Unpack and Interleave High-Order Words into Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHWD xmm xmm xmm
|
|
// VPUNPCKHWD m128 xmm xmm
|
|
// VPUNPCKHWD ymm ymm ymm
|
|
// VPUNPCKHWD m256 ymm ymm
|
|
// Construct and append a VPUNPCKHWD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKHWD(mxy, xy, xy1 operand.Op) { ctx.VPUNPCKHWD(mxy, xy, xy1) }
|
|
|
|
// VPUNPCKLBW: Unpack and Interleave Low-Order Bytes into Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLBW xmm xmm xmm
|
|
// VPUNPCKLBW m128 xmm xmm
|
|
// VPUNPCKLBW ymm ymm ymm
|
|
// VPUNPCKLBW m256 ymm ymm
|
|
// Construct and append a VPUNPCKLBW instruction to the active function.
|
|
func (c *Context) VPUNPCKLBW(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPUNPCKLBW(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPUNPCKLBW: Unpack and Interleave Low-Order Bytes into Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLBW xmm xmm xmm
|
|
// VPUNPCKLBW m128 xmm xmm
|
|
// VPUNPCKLBW ymm ymm ymm
|
|
// VPUNPCKLBW m256 ymm ymm
|
|
// Construct and append a VPUNPCKLBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKLBW(mxy, xy, xy1 operand.Op) { ctx.VPUNPCKLBW(mxy, xy, xy1) }
|
|
|
|
// VPUNPCKLDQ: Unpack and Interleave Low-Order Doublewords into Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLDQ xmm xmm xmm
|
|
// VPUNPCKLDQ m128 xmm xmm
|
|
// VPUNPCKLDQ ymm ymm ymm
|
|
// VPUNPCKLDQ m256 ymm ymm
|
|
// Construct and append a VPUNPCKLDQ instruction to the active function.
|
|
func (c *Context) VPUNPCKLDQ(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPUNPCKLDQ(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPUNPCKLDQ: Unpack and Interleave Low-Order Doublewords into Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLDQ xmm xmm xmm
|
|
// VPUNPCKLDQ m128 xmm xmm
|
|
// VPUNPCKLDQ ymm ymm ymm
|
|
// VPUNPCKLDQ m256 ymm ymm
|
|
// Construct and append a VPUNPCKLDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKLDQ(mxy, xy, xy1 operand.Op) { ctx.VPUNPCKLDQ(mxy, xy, xy1) }
|
|
|
|
// VPUNPCKLQDQ: Unpack and Interleave Low-Order Quadwords into Double Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLQDQ xmm xmm xmm
|
|
// VPUNPCKLQDQ m128 xmm xmm
|
|
// VPUNPCKLQDQ ymm ymm ymm
|
|
// VPUNPCKLQDQ m256 ymm ymm
|
|
// Construct and append a VPUNPCKLQDQ instruction to the active function.
|
|
func (c *Context) VPUNPCKLQDQ(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPUNPCKLQDQ(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPUNPCKLQDQ: Unpack and Interleave Low-Order Quadwords into Double Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLQDQ xmm xmm xmm
|
|
// VPUNPCKLQDQ m128 xmm xmm
|
|
// VPUNPCKLQDQ ymm ymm ymm
|
|
// VPUNPCKLQDQ m256 ymm ymm
|
|
// Construct and append a VPUNPCKLQDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKLQDQ(mxy, xy, xy1 operand.Op) { ctx.VPUNPCKLQDQ(mxy, xy, xy1) }
|
|
|
|
// VPUNPCKLWD: Unpack and Interleave Low-Order Words into Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLWD xmm xmm xmm
|
|
// VPUNPCKLWD m128 xmm xmm
|
|
// VPUNPCKLWD ymm ymm ymm
|
|
// VPUNPCKLWD m256 ymm ymm
|
|
// Construct and append a VPUNPCKLWD instruction to the active function.
|
|
func (c *Context) VPUNPCKLWD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPUNPCKLWD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPUNPCKLWD: Unpack and Interleave Low-Order Words into Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLWD xmm xmm xmm
|
|
// VPUNPCKLWD m128 xmm xmm
|
|
// VPUNPCKLWD ymm ymm ymm
|
|
// VPUNPCKLWD m256 ymm ymm
|
|
// Construct and append a VPUNPCKLWD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKLWD(mxy, xy, xy1 operand.Op) { ctx.VPUNPCKLWD(mxy, xy, xy1) }
|
|
|
|
// VPXOR: Packed Bitwise Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPXOR xmm xmm xmm
|
|
// VPXOR m128 xmm xmm
|
|
// VPXOR ymm ymm ymm
|
|
// VPXOR m256 ymm ymm
|
|
// Construct and append a VPXOR instruction to the active function.
|
|
func (c *Context) VPXOR(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VPXOR(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VPXOR: Packed Bitwise Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPXOR xmm xmm xmm
|
|
// VPXOR m128 xmm xmm
|
|
// VPXOR ymm ymm ymm
|
|
// VPXOR m256 ymm ymm
|
|
// Construct and append a VPXOR instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPXOR(mxy, xy, xy1 operand.Op) { ctx.VPXOR(mxy, xy, xy1) }
|
|
|
|
// VRCPPS: Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCPPS xmm xmm
|
|
// VRCPPS m128 xmm
|
|
// VRCPPS ymm ymm
|
|
// VRCPPS m256 ymm
|
|
// Construct and append a VRCPPS instruction to the active function.
|
|
func (c *Context) VRCPPS(mxy, xy operand.Op) {
|
|
if inst, err := x86.VRCPPS(mxy, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VRCPPS: Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCPPS xmm xmm
|
|
// VRCPPS m128 xmm
|
|
// VRCPPS ymm ymm
|
|
// VRCPPS m256 ymm
|
|
// Construct and append a VRCPPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCPPS(mxy, xy operand.Op) { ctx.VRCPPS(mxy, xy) }
|
|
|
|
// VRCPSS: Compute Approximate Reciprocal of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCPSS xmm xmm xmm
|
|
// VRCPSS m32 xmm xmm
|
|
// Construct and append a VRCPSS instruction to the active function.
|
|
func (c *Context) VRCPSS(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VRCPSS(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VRCPSS: Compute Approximate Reciprocal of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCPSS xmm xmm xmm
|
|
// VRCPSS m32 xmm xmm
|
|
// Construct and append a VRCPSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCPSS(mx, x, x1 operand.Op) { ctx.VRCPSS(mx, x, x1) }
|
|
|
|
// VROUNDPD: Round Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VROUNDPD imm8 xmm xmm
|
|
// VROUNDPD imm8 m128 xmm
|
|
// VROUNDPD imm8 ymm ymm
|
|
// VROUNDPD imm8 m256 ymm
|
|
// Construct and append a VROUNDPD instruction to the active function.
|
|
func (c *Context) VROUNDPD(i, mxy, xy operand.Op) {
|
|
if inst, err := x86.VROUNDPD(i, mxy, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VROUNDPD: Round Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VROUNDPD imm8 xmm xmm
|
|
// VROUNDPD imm8 m128 xmm
|
|
// VROUNDPD imm8 ymm ymm
|
|
// VROUNDPD imm8 m256 ymm
|
|
// Construct and append a VROUNDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VROUNDPD(i, mxy, xy operand.Op) { ctx.VROUNDPD(i, mxy, xy) }
|
|
|
|
// VROUNDPS: Round Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VROUNDPS imm8 xmm xmm
|
|
// VROUNDPS imm8 m128 xmm
|
|
// VROUNDPS imm8 ymm ymm
|
|
// VROUNDPS imm8 m256 ymm
|
|
// Construct and append a VROUNDPS instruction to the active function.
|
|
func (c *Context) VROUNDPS(i, mxy, xy operand.Op) {
|
|
if inst, err := x86.VROUNDPS(i, mxy, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VROUNDPS: Round Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VROUNDPS imm8 xmm xmm
|
|
// VROUNDPS imm8 m128 xmm
|
|
// VROUNDPS imm8 ymm ymm
|
|
// VROUNDPS imm8 m256 ymm
|
|
// Construct and append a VROUNDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VROUNDPS(i, mxy, xy operand.Op) { ctx.VROUNDPS(i, mxy, xy) }
|
|
|
|
// VROUNDSD: Round Scalar Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VROUNDSD imm8 xmm xmm xmm
|
|
// VROUNDSD imm8 m64 xmm xmm
|
|
// Construct and append a VROUNDSD instruction to the active function.
|
|
func (c *Context) VROUNDSD(i, mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VROUNDSD(i, mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VROUNDSD: Round Scalar Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VROUNDSD imm8 xmm xmm xmm
|
|
// VROUNDSD imm8 m64 xmm xmm
|
|
// Construct and append a VROUNDSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VROUNDSD(i, mx, x, x1 operand.Op) { ctx.VROUNDSD(i, mx, x, x1) }
|
|
|
|
// VROUNDSS: Round Scalar Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VROUNDSS imm8 xmm xmm xmm
|
|
// VROUNDSS imm8 m32 xmm xmm
|
|
// Construct and append a VROUNDSS instruction to the active function.
|
|
func (c *Context) VROUNDSS(i, mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VROUNDSS(i, mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VROUNDSS: Round Scalar Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VROUNDSS imm8 xmm xmm xmm
|
|
// VROUNDSS imm8 m32 xmm xmm
|
|
// Construct and append a VROUNDSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VROUNDSS(i, mx, x, x1 operand.Op) { ctx.VROUNDSS(i, mx, x, x1) }
|
|
|
|
// VRSQRTPS: Compute Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRTPS xmm xmm
|
|
// VRSQRTPS m128 xmm
|
|
// VRSQRTPS ymm ymm
|
|
// VRSQRTPS m256 ymm
|
|
// Construct and append a VRSQRTPS instruction to the active function.
|
|
func (c *Context) VRSQRTPS(mxy, xy operand.Op) {
|
|
if inst, err := x86.VRSQRTPS(mxy, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VRSQRTPS: Compute Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRTPS xmm xmm
|
|
// VRSQRTPS m128 xmm
|
|
// VRSQRTPS ymm ymm
|
|
// VRSQRTPS m256 ymm
|
|
// Construct and append a VRSQRTPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRTPS(mxy, xy operand.Op) { ctx.VRSQRTPS(mxy, xy) }
|
|
|
|
// VRSQRTSS: Compute Reciprocal of Square Root of Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRTSS xmm xmm xmm
|
|
// VRSQRTSS m32 xmm xmm
|
|
// Construct and append a VRSQRTSS instruction to the active function.
|
|
func (c *Context) VRSQRTSS(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VRSQRTSS(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VRSQRTSS: Compute Reciprocal of Square Root of Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRTSS xmm xmm xmm
|
|
// VRSQRTSS m32 xmm xmm
|
|
// Construct and append a VRSQRTSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRTSS(mx, x, x1 operand.Op) { ctx.VRSQRTSS(mx, x, x1) }
|
|
|
|
// VSHUFPD: Shuffle Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFPD imm8 xmm xmm xmm
|
|
// VSHUFPD imm8 m128 xmm xmm
|
|
// VSHUFPD imm8 ymm ymm ymm
|
|
// VSHUFPD imm8 m256 ymm ymm
|
|
// Construct and append a VSHUFPD instruction to the active function.
|
|
func (c *Context) VSHUFPD(i, mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VSHUFPD(i, mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VSHUFPD: Shuffle Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFPD imm8 xmm xmm xmm
|
|
// VSHUFPD imm8 m128 xmm xmm
|
|
// VSHUFPD imm8 ymm ymm ymm
|
|
// VSHUFPD imm8 m256 ymm ymm
|
|
// Construct and append a VSHUFPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFPD(i, mxy, xy, xy1 operand.Op) { ctx.VSHUFPD(i, mxy, xy, xy1) }
|
|
|
|
// VSHUFPS: Shuffle Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFPS imm8 xmm xmm xmm
|
|
// VSHUFPS imm8 m128 xmm xmm
|
|
// VSHUFPS imm8 ymm ymm ymm
|
|
// VSHUFPS imm8 m256 ymm ymm
|
|
// Construct and append a VSHUFPS instruction to the active function.
|
|
func (c *Context) VSHUFPS(i, mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VSHUFPS(i, mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VSHUFPS: Shuffle Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFPS imm8 xmm xmm xmm
|
|
// VSHUFPS imm8 m128 xmm xmm
|
|
// VSHUFPS imm8 ymm ymm ymm
|
|
// VSHUFPS imm8 m256 ymm ymm
|
|
// Construct and append a VSHUFPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFPS(i, mxy, xy, xy1 operand.Op) { ctx.VSHUFPS(i, mxy, xy, xy1) }
|
|
|
|
// VSQRTPD: Compute Square Roots of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD xmm xmm
|
|
// VSQRTPD m128 xmm
|
|
// VSQRTPD ymm ymm
|
|
// VSQRTPD m256 ymm
|
|
// Construct and append a VSQRTPD instruction to the active function.
|
|
func (c *Context) VSQRTPD(mxy, xy operand.Op) {
|
|
if inst, err := x86.VSQRTPD(mxy, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VSQRTPD: Compute Square Roots of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD xmm xmm
|
|
// VSQRTPD m128 xmm
|
|
// VSQRTPD ymm ymm
|
|
// VSQRTPD m256 ymm
|
|
// Construct and append a VSQRTPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPD(mxy, xy operand.Op) { ctx.VSQRTPD(mxy, xy) }
|
|
|
|
// VSQRTPS: Compute Square Roots of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS xmm xmm
|
|
// VSQRTPS m128 xmm
|
|
// VSQRTPS ymm ymm
|
|
// VSQRTPS m256 ymm
|
|
// Construct and append a VSQRTPS instruction to the active function.
|
|
func (c *Context) VSQRTPS(mxy, xy operand.Op) {
|
|
if inst, err := x86.VSQRTPS(mxy, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VSQRTPS: Compute Square Roots of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS xmm xmm
|
|
// VSQRTPS m128 xmm
|
|
// VSQRTPS ymm ymm
|
|
// VSQRTPS m256 ymm
|
|
// Construct and append a VSQRTPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPS(mxy, xy operand.Op) { ctx.VSQRTPS(mxy, xy) }
|
|
|
|
// VSQRTSD: Compute Square Root of Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSD xmm xmm xmm
|
|
// VSQRTSD m64 xmm xmm
|
|
// Construct and append a VSQRTSD instruction to the active function.
|
|
func (c *Context) VSQRTSD(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VSQRTSD(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VSQRTSD: Compute Square Root of Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSD xmm xmm xmm
|
|
// VSQRTSD m64 xmm xmm
|
|
// Construct and append a VSQRTSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTSD(mx, x, x1 operand.Op) { ctx.VSQRTSD(mx, x, x1) }
|
|
|
|
// VSQRTSS: Compute Square Root of Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSS xmm xmm xmm
|
|
// VSQRTSS m32 xmm xmm
|
|
// Construct and append a VSQRTSS instruction to the active function.
|
|
func (c *Context) VSQRTSS(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VSQRTSS(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VSQRTSS: Compute Square Root of Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSS xmm xmm xmm
|
|
// VSQRTSS m32 xmm xmm
|
|
// Construct and append a VSQRTSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTSS(mx, x, x1 operand.Op) { ctx.VSQRTSS(mx, x, x1) }
|
|
|
|
// VSTMXCSR: Store MXCSR Register State.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSTMXCSR m32
|
|
// Construct and append a VSTMXCSR instruction to the active function.
|
|
func (c *Context) VSTMXCSR(m operand.Op) {
|
|
if inst, err := x86.VSTMXCSR(m); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VSTMXCSR: Store MXCSR Register State.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSTMXCSR m32
|
|
// Construct and append a VSTMXCSR instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSTMXCSR(m operand.Op) { ctx.VSTMXCSR(m) }
|
|
|
|
// VSUBPD: Subtract Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD xmm xmm xmm
|
|
// VSUBPD m128 xmm xmm
|
|
// VSUBPD ymm ymm ymm
|
|
// VSUBPD m256 ymm ymm
|
|
// Construct and append a VSUBPD instruction to the active function.
|
|
func (c *Context) VSUBPD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VSUBPD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VSUBPD: Subtract Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD xmm xmm xmm
|
|
// VSUBPD m128 xmm xmm
|
|
// VSUBPD ymm ymm ymm
|
|
// VSUBPD m256 ymm ymm
|
|
// Construct and append a VSUBPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPD(mxy, xy, xy1 operand.Op) { ctx.VSUBPD(mxy, xy, xy1) }
|
|
|
|
// VSUBPS: Subtract Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS xmm xmm xmm
|
|
// VSUBPS m128 xmm xmm
|
|
// VSUBPS ymm ymm ymm
|
|
// VSUBPS m256 ymm ymm
|
|
// Construct and append a VSUBPS instruction to the active function.
|
|
func (c *Context) VSUBPS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VSUBPS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VSUBPS: Subtract Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS xmm xmm xmm
|
|
// VSUBPS m128 xmm xmm
|
|
// VSUBPS ymm ymm ymm
|
|
// VSUBPS m256 ymm ymm
|
|
// Construct and append a VSUBPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPS(mxy, xy, xy1 operand.Op) { ctx.VSUBPS(mxy, xy, xy1) }
|
|
|
|
// VSUBSD: Subtract Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSD xmm xmm xmm
|
|
// VSUBSD m64 xmm xmm
|
|
// Construct and append a VSUBSD instruction to the active function.
|
|
func (c *Context) VSUBSD(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VSUBSD(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VSUBSD: Subtract Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSD xmm xmm xmm
|
|
// VSUBSD m64 xmm xmm
|
|
// Construct and append a VSUBSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBSD(mx, x, x1 operand.Op) { ctx.VSUBSD(mx, x, x1) }
|
|
|
|
// VSUBSS: Subtract Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSS xmm xmm xmm
|
|
// VSUBSS m32 xmm xmm
|
|
// Construct and append a VSUBSS instruction to the active function.
|
|
func (c *Context) VSUBSS(mx, x, x1 operand.Op) {
|
|
if inst, err := x86.VSUBSS(mx, x, x1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VSUBSS: Subtract Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSS xmm xmm xmm
|
|
// VSUBSS m32 xmm xmm
|
|
// Construct and append a VSUBSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBSS(mx, x, x1 operand.Op) { ctx.VSUBSS(mx, x, x1) }
|
|
|
|
// VTESTPD: Packed Double-Precision Floating-Point Bit Test.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VTESTPD xmm xmm
|
|
// VTESTPD m128 xmm
|
|
// VTESTPD ymm ymm
|
|
// VTESTPD m256 ymm
|
|
// Construct and append a VTESTPD instruction to the active function.
|
|
func (c *Context) VTESTPD(mxy, xy operand.Op) {
|
|
if inst, err := x86.VTESTPD(mxy, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VTESTPD: Packed Double-Precision Floating-Point Bit Test.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VTESTPD xmm xmm
|
|
// VTESTPD m128 xmm
|
|
// VTESTPD ymm ymm
|
|
// VTESTPD m256 ymm
|
|
// Construct and append a VTESTPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VTESTPD(mxy, xy operand.Op) { ctx.VTESTPD(mxy, xy) }
|
|
|
|
// VTESTPS: Packed Single-Precision Floating-Point Bit Test.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VTESTPS xmm xmm
|
|
// VTESTPS m128 xmm
|
|
// VTESTPS ymm ymm
|
|
// VTESTPS m256 ymm
|
|
// Construct and append a VTESTPS instruction to the active function.
|
|
func (c *Context) VTESTPS(mxy, xy operand.Op) {
|
|
if inst, err := x86.VTESTPS(mxy, xy); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VTESTPS: Packed Single-Precision Floating-Point Bit Test.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VTESTPS xmm xmm
|
|
// VTESTPS m128 xmm
|
|
// VTESTPS ymm ymm
|
|
// VTESTPS m256 ymm
|
|
// Construct and append a VTESTPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VTESTPS(mxy, xy operand.Op) { ctx.VTESTPS(mxy, xy) }
|
|
|
|
// VUCOMISD: Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUCOMISD xmm xmm
|
|
// VUCOMISD m64 xmm
|
|
// Construct and append a VUCOMISD instruction to the active function.
|
|
func (c *Context) VUCOMISD(mx, x operand.Op) {
|
|
if inst, err := x86.VUCOMISD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VUCOMISD: Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUCOMISD xmm xmm
|
|
// VUCOMISD m64 xmm
|
|
// Construct and append a VUCOMISD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUCOMISD(mx, x operand.Op) { ctx.VUCOMISD(mx, x) }
|
|
|
|
// VUCOMISS: Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUCOMISS xmm xmm
|
|
// VUCOMISS m32 xmm
|
|
// Construct and append a VUCOMISS instruction to the active function.
|
|
func (c *Context) VUCOMISS(mx, x operand.Op) {
|
|
if inst, err := x86.VUCOMISS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VUCOMISS: Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUCOMISS xmm xmm
|
|
// VUCOMISS m32 xmm
|
|
// Construct and append a VUCOMISS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUCOMISS(mx, x operand.Op) { ctx.VUCOMISS(mx, x) }
|
|
|
|
// VUNPCKHPD: Unpack and Interleave High Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKHPD xmm xmm xmm
|
|
// VUNPCKHPD m128 xmm xmm
|
|
// VUNPCKHPD ymm ymm ymm
|
|
// VUNPCKHPD m256 ymm ymm
|
|
// Construct and append a VUNPCKHPD instruction to the active function.
|
|
func (c *Context) VUNPCKHPD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VUNPCKHPD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VUNPCKHPD: Unpack and Interleave High Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKHPD xmm xmm xmm
|
|
// VUNPCKHPD m128 xmm xmm
|
|
// VUNPCKHPD ymm ymm ymm
|
|
// VUNPCKHPD m256 ymm ymm
|
|
// Construct and append a VUNPCKHPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUNPCKHPD(mxy, xy, xy1 operand.Op) { ctx.VUNPCKHPD(mxy, xy, xy1) }
|
|
|
|
// VUNPCKHPS: Unpack and Interleave High Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKHPS xmm xmm xmm
|
|
// VUNPCKHPS m128 xmm xmm
|
|
// VUNPCKHPS ymm ymm ymm
|
|
// VUNPCKHPS m256 ymm ymm
|
|
// Construct and append a VUNPCKHPS instruction to the active function.
|
|
func (c *Context) VUNPCKHPS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VUNPCKHPS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VUNPCKHPS: Unpack and Interleave High Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKHPS xmm xmm xmm
|
|
// VUNPCKHPS m128 xmm xmm
|
|
// VUNPCKHPS ymm ymm ymm
|
|
// VUNPCKHPS m256 ymm ymm
|
|
// Construct and append a VUNPCKHPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUNPCKHPS(mxy, xy, xy1 operand.Op) { ctx.VUNPCKHPS(mxy, xy, xy1) }
|
|
|
|
// VUNPCKLPD: Unpack and Interleave Low Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKLPD xmm xmm xmm
|
|
// VUNPCKLPD m128 xmm xmm
|
|
// VUNPCKLPD ymm ymm ymm
|
|
// VUNPCKLPD m256 ymm ymm
|
|
// Construct and append a VUNPCKLPD instruction to the active function.
|
|
func (c *Context) VUNPCKLPD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VUNPCKLPD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VUNPCKLPD: Unpack and Interleave Low Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKLPD xmm xmm xmm
|
|
// VUNPCKLPD m128 xmm xmm
|
|
// VUNPCKLPD ymm ymm ymm
|
|
// VUNPCKLPD m256 ymm ymm
|
|
// Construct and append a VUNPCKLPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUNPCKLPD(mxy, xy, xy1 operand.Op) { ctx.VUNPCKLPD(mxy, xy, xy1) }
|
|
|
|
// VUNPCKLPS: Unpack and Interleave Low Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKLPS xmm xmm xmm
|
|
// VUNPCKLPS m128 xmm xmm
|
|
// VUNPCKLPS ymm ymm ymm
|
|
// VUNPCKLPS m256 ymm ymm
|
|
// Construct and append a VUNPCKLPS instruction to the active function.
|
|
func (c *Context) VUNPCKLPS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VUNPCKLPS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VUNPCKLPS: Unpack and Interleave Low Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKLPS xmm xmm xmm
|
|
// VUNPCKLPS m128 xmm xmm
|
|
// VUNPCKLPS ymm ymm ymm
|
|
// VUNPCKLPS m256 ymm ymm
|
|
// Construct and append a VUNPCKLPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUNPCKLPS(mxy, xy, xy1 operand.Op) { ctx.VUNPCKLPS(mxy, xy, xy1) }
|
|
|
|
// VXORPD: Bitwise Logical XOR for Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VXORPD xmm xmm xmm
|
|
// VXORPD m128 xmm xmm
|
|
// VXORPD ymm ymm ymm
|
|
// VXORPD m256 ymm ymm
|
|
// Construct and append a VXORPD instruction to the active function.
|
|
func (c *Context) VXORPD(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VXORPD(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VXORPD: Bitwise Logical XOR for Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VXORPD xmm xmm xmm
|
|
// VXORPD m128 xmm xmm
|
|
// VXORPD ymm ymm ymm
|
|
// VXORPD m256 ymm ymm
|
|
// Construct and append a VXORPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VXORPD(mxy, xy, xy1 operand.Op) { ctx.VXORPD(mxy, xy, xy1) }
|
|
|
|
// VXORPS: Bitwise Logical XOR for Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VXORPS xmm xmm xmm
|
|
// VXORPS m128 xmm xmm
|
|
// VXORPS ymm ymm ymm
|
|
// VXORPS m256 ymm ymm
|
|
// Construct and append a VXORPS instruction to the active function.
|
|
func (c *Context) VXORPS(mxy, xy, xy1 operand.Op) {
|
|
if inst, err := x86.VXORPS(mxy, xy, xy1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VXORPS: Bitwise Logical XOR for Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VXORPS xmm xmm xmm
|
|
// VXORPS m128 xmm xmm
|
|
// VXORPS ymm ymm ymm
|
|
// VXORPS m256 ymm ymm
|
|
// Construct and append a VXORPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VXORPS(mxy, xy, xy1 operand.Op) { ctx.VXORPS(mxy, xy, xy1) }
|
|
|
|
// VZEROALL: Zero All YMM Registers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VZEROALL
|
|
// Construct and append a VZEROALL instruction to the active function.
|
|
func (c *Context) VZEROALL() {
|
|
if inst, err := x86.VZEROALL(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VZEROALL: Zero All YMM Registers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VZEROALL
|
|
// Construct and append a VZEROALL instruction to the active function.
|
|
// Operates on the global context.
|
|
func VZEROALL() { ctx.VZEROALL() }
|
|
|
|
// VZEROUPPER: Zero Upper Bits of YMM Registers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VZEROUPPER
|
|
// Construct and append a VZEROUPPER instruction to the active function.
|
|
func (c *Context) VZEROUPPER() {
|
|
if inst, err := x86.VZEROUPPER(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// VZEROUPPER: Zero Upper Bits of YMM Registers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VZEROUPPER
|
|
// Construct and append a VZEROUPPER instruction to the active function.
|
|
// Operates on the global context.
|
|
func VZEROUPPER() { ctx.VZEROUPPER() }
|
|
|
|
// XADDB: Exchange and Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XADDB r8 r8
|
|
// XADDB r8 m8
|
|
// Construct and append a XADDB instruction to the active function.
|
|
func (c *Context) XADDB(r, mr operand.Op) {
|
|
if inst, err := x86.XADDB(r, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// XADDB: Exchange and Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XADDB r8 r8
|
|
// XADDB r8 m8
|
|
// Construct and append a XADDB instruction to the active function.
|
|
// Operates on the global context.
|
|
func XADDB(r, mr operand.Op) { ctx.XADDB(r, mr) }
|
|
|
|
// XADDL: Exchange and Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XADDL r32 r32
|
|
// XADDL r32 m32
|
|
// Construct and append a XADDL instruction to the active function.
|
|
func (c *Context) XADDL(r, mr operand.Op) {
|
|
if inst, err := x86.XADDL(r, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// XADDL: Exchange and Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XADDL r32 r32
|
|
// XADDL r32 m32
|
|
// Construct and append a XADDL instruction to the active function.
|
|
// Operates on the global context.
|
|
func XADDL(r, mr operand.Op) { ctx.XADDL(r, mr) }
|
|
|
|
// XADDQ: Exchange and Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XADDQ r64 r64
|
|
// XADDQ r64 m64
|
|
// Construct and append a XADDQ instruction to the active function.
|
|
func (c *Context) XADDQ(r, mr operand.Op) {
|
|
if inst, err := x86.XADDQ(r, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// XADDQ: Exchange and Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XADDQ r64 r64
|
|
// XADDQ r64 m64
|
|
// Construct and append a XADDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func XADDQ(r, mr operand.Op) { ctx.XADDQ(r, mr) }
|
|
|
|
// XADDW: Exchange and Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XADDW r16 r16
|
|
// XADDW r16 m16
|
|
// Construct and append a XADDW instruction to the active function.
|
|
func (c *Context) XADDW(r, mr operand.Op) {
|
|
if inst, err := x86.XADDW(r, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// XADDW: Exchange and Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XADDW r16 r16
|
|
// XADDW r16 m16
|
|
// Construct and append a XADDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func XADDW(r, mr operand.Op) { ctx.XADDW(r, mr) }
|
|
|
|
// XCHGB: Exchange Register/Memory with Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XCHGB r8 r8
|
|
// XCHGB m8 r8
|
|
// XCHGB r8 m8
|
|
// Construct and append a XCHGB instruction to the active function.
|
|
func (c *Context) XCHGB(mr, mr1 operand.Op) {
|
|
if inst, err := x86.XCHGB(mr, mr1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// XCHGB: Exchange Register/Memory with Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XCHGB r8 r8
|
|
// XCHGB m8 r8
|
|
// XCHGB r8 m8
|
|
// Construct and append a XCHGB instruction to the active function.
|
|
// Operates on the global context.
|
|
func XCHGB(mr, mr1 operand.Op) { ctx.XCHGB(mr, mr1) }
|
|
|
|
// XCHGL: Exchange Register/Memory with Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XCHGL r32 eax
|
|
// XCHGL eax r32
|
|
// XCHGL r32 r32
|
|
// XCHGL m32 r32
|
|
// XCHGL r32 m32
|
|
// Construct and append a XCHGL instruction to the active function.
|
|
func (c *Context) XCHGL(emr, emr1 operand.Op) {
|
|
if inst, err := x86.XCHGL(emr, emr1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// XCHGL: Exchange Register/Memory with Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XCHGL r32 eax
|
|
// XCHGL eax r32
|
|
// XCHGL r32 r32
|
|
// XCHGL m32 r32
|
|
// XCHGL r32 m32
|
|
// Construct and append a XCHGL instruction to the active function.
|
|
// Operates on the global context.
|
|
func XCHGL(emr, emr1 operand.Op) { ctx.XCHGL(emr, emr1) }
|
|
|
|
// XCHGQ: Exchange Register/Memory with Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XCHGQ r64 rax
|
|
// XCHGQ rax r64
|
|
// XCHGQ r64 r64
|
|
// XCHGQ m64 r64
|
|
// XCHGQ r64 m64
|
|
// Construct and append a XCHGQ instruction to the active function.
|
|
func (c *Context) XCHGQ(mr, mr1 operand.Op) {
|
|
if inst, err := x86.XCHGQ(mr, mr1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// XCHGQ: Exchange Register/Memory with Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XCHGQ r64 rax
|
|
// XCHGQ rax r64
|
|
// XCHGQ r64 r64
|
|
// XCHGQ m64 r64
|
|
// XCHGQ r64 m64
|
|
// Construct and append a XCHGQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func XCHGQ(mr, mr1 operand.Op) { ctx.XCHGQ(mr, mr1) }
|
|
|
|
// XCHGW: Exchange Register/Memory with Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XCHGW r16 ax
|
|
// XCHGW ax r16
|
|
// XCHGW r16 r16
|
|
// XCHGW m16 r16
|
|
// XCHGW r16 m16
|
|
// Construct and append a XCHGW instruction to the active function.
|
|
func (c *Context) XCHGW(amr, amr1 operand.Op) {
|
|
if inst, err := x86.XCHGW(amr, amr1); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// XCHGW: Exchange Register/Memory with Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XCHGW r16 ax
|
|
// XCHGW ax r16
|
|
// XCHGW r16 r16
|
|
// XCHGW m16 r16
|
|
// XCHGW r16 m16
|
|
// Construct and append a XCHGW instruction to the active function.
|
|
// Operates on the global context.
|
|
func XCHGW(amr, amr1 operand.Op) { ctx.XCHGW(amr, amr1) }
|
|
|
|
// XGETBV: Get Value of Extended Control Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XGETBV
|
|
// Construct and append a XGETBV instruction to the active function.
|
|
func (c *Context) XGETBV() {
|
|
if inst, err := x86.XGETBV(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// XGETBV: Get Value of Extended Control Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XGETBV
|
|
// Construct and append a XGETBV instruction to the active function.
|
|
// Operates on the global context.
|
|
func XGETBV() { ctx.XGETBV() }
|
|
|
|
// XLAT: Table Look-up Translation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XLAT
|
|
// Construct and append a XLAT instruction to the active function.
|
|
func (c *Context) XLAT() {
|
|
if inst, err := x86.XLAT(); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// XLAT: Table Look-up Translation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XLAT
|
|
// Construct and append a XLAT instruction to the active function.
|
|
// Operates on the global context.
|
|
func XLAT() { ctx.XLAT() }
|
|
|
|
// XORB: Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORB imm8 al
|
|
// XORB imm8 r8
|
|
// XORB r8 r8
|
|
// XORB m8 r8
|
|
// XORB imm8 m8
|
|
// XORB r8 m8
|
|
// Construct and append a XORB instruction to the active function.
|
|
func (c *Context) XORB(imr, amr operand.Op) {
|
|
if inst, err := x86.XORB(imr, amr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// XORB: Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORB imm8 al
|
|
// XORB imm8 r8
|
|
// XORB r8 r8
|
|
// XORB m8 r8
|
|
// XORB imm8 m8
|
|
// XORB r8 m8
|
|
// Construct and append a XORB instruction to the active function.
|
|
// Operates on the global context.
|
|
func XORB(imr, amr operand.Op) { ctx.XORB(imr, amr) }
|
|
|
|
// XORL: Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORL imm32 eax
|
|
// XORL imm8 r32
|
|
// XORL imm32 r32
|
|
// XORL r32 r32
|
|
// XORL m32 r32
|
|
// XORL imm8 m32
|
|
// XORL imm32 m32
|
|
// XORL r32 m32
|
|
// Construct and append a XORL instruction to the active function.
|
|
func (c *Context) XORL(imr, emr operand.Op) {
|
|
if inst, err := x86.XORL(imr, emr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// XORL: Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORL imm32 eax
|
|
// XORL imm8 r32
|
|
// XORL imm32 r32
|
|
// XORL r32 r32
|
|
// XORL m32 r32
|
|
// XORL imm8 m32
|
|
// XORL imm32 m32
|
|
// XORL r32 m32
|
|
// Construct and append a XORL instruction to the active function.
|
|
// Operates on the global context.
|
|
func XORL(imr, emr operand.Op) { ctx.XORL(imr, emr) }
|
|
|
|
// XORPD: Bitwise Logical XOR for Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORPD xmm xmm
|
|
// XORPD m128 xmm
|
|
// Construct and append a XORPD instruction to the active function.
|
|
func (c *Context) XORPD(mx, x operand.Op) {
|
|
if inst, err := x86.XORPD(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// XORPD: Bitwise Logical XOR for Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORPD xmm xmm
|
|
// XORPD m128 xmm
|
|
// Construct and append a XORPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func XORPD(mx, x operand.Op) { ctx.XORPD(mx, x) }
|
|
|
|
// XORPS: Bitwise Logical XOR for Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORPS xmm xmm
|
|
// XORPS m128 xmm
|
|
// Construct and append a XORPS instruction to the active function.
|
|
func (c *Context) XORPS(mx, x operand.Op) {
|
|
if inst, err := x86.XORPS(mx, x); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// XORPS: Bitwise Logical XOR for Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORPS xmm xmm
|
|
// XORPS m128 xmm
|
|
// Construct and append a XORPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func XORPS(mx, x operand.Op) { ctx.XORPS(mx, x) }
|
|
|
|
// XORQ: Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORQ imm32 rax
|
|
// XORQ imm8 r64
|
|
// XORQ imm32 r64
|
|
// XORQ r64 r64
|
|
// XORQ m64 r64
|
|
// XORQ imm8 m64
|
|
// XORQ imm32 m64
|
|
// XORQ r64 m64
|
|
// Construct and append a XORQ instruction to the active function.
|
|
func (c *Context) XORQ(imr, mr operand.Op) {
|
|
if inst, err := x86.XORQ(imr, mr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// XORQ: Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORQ imm32 rax
|
|
// XORQ imm8 r64
|
|
// XORQ imm32 r64
|
|
// XORQ r64 r64
|
|
// XORQ m64 r64
|
|
// XORQ imm8 m64
|
|
// XORQ imm32 m64
|
|
// XORQ r64 m64
|
|
// Construct and append a XORQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func XORQ(imr, mr operand.Op) { ctx.XORQ(imr, mr) }
|
|
|
|
// XORW: Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORW imm16 ax
|
|
// XORW imm8 r16
|
|
// XORW imm16 r16
|
|
// XORW r16 r16
|
|
// XORW m16 r16
|
|
// XORW imm8 m16
|
|
// XORW imm16 m16
|
|
// XORW r16 m16
|
|
// Construct and append a XORW instruction to the active function.
|
|
func (c *Context) XORW(imr, amr operand.Op) {
|
|
if inst, err := x86.XORW(imr, amr); err == nil {
|
|
c.Instruction(inst)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// XORW: Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORW imm16 ax
|
|
// XORW imm8 r16
|
|
// XORW imm16 r16
|
|
// XORW r16 r16
|
|
// XORW m16 r16
|
|
// XORW imm8 m16
|
|
// XORW imm16 m16
|
|
// XORW r16 m16
|
|
// Construct and append a XORW instruction to the active function.
|
|
// Operates on the global context.
|
|
func XORW(imr, amr operand.Op) { ctx.XORW(imr, amr) }
|