AMD64

Registers

Type Names
int64 %rdi, %rsi, %rdx, %rcx, %r8, %r9, %rax, %r10, %r11, %r12, %r13, %r14, %r15, %rbx, %rbp
int3232 %mmx0, %mmx1, %mmx2, %mmx3, %mmx4, %mmx5, %mmx6, %mmx7
int6464 %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15
float80 %st(0), %st(1), %st(2), %st(3), %st(4), %st(5), %st(6), %st(7)

Caller (Callee safe, non-volatile) registers

Type Names
int64 9, 10, 11, 12, 13, 14, 15

Supported stack types

Type Number of bytes
stack32 4
stack64 8
stack128 16
stack256 32
stack512 64

Supported integer operations

Supported integer load/store operations

Qhasm Instruction Input Evaluated flags Output Set flags Assembly Code
r = *(uint8 *) (s + n) int64 s, immediate n int64 r movzbq n(s),r
r = *(uint16 *) (s + n) int64 s, immediate n int64 r movzwq n(s),r
r = *(uint32 *) (s + n) int64 s, immediate n int64 r movl n(s),r%32
r = *(uint64 *) (s + n) int64 s, immediate n int64 r movq n(s),r
r = *( int8 *) (s + n) int64 s, immediate n int64 r movsbq n(s),r
r = *( int16 *) (s + n) int64 s, immediate n int64 r movswq n(s),r
r = *( int32 *) (s + n) int64 s, immediate n int64 r movslq n(s),r
r = *( int64 *) (s + n) int64 s, immediate n int64 r movq n(s),r
r = *(uint8 *) (s + t) int64 s, int64 t int64 r movzbq (s,t),r
r = *(uint16 *) (s + t) int64 s, int64 t int64 r movzwq (s,t),r
r = *(uint32 *) (s + t) int64 s, int64 t int64 r movl (s,t),r%32
r = *(uint64 *) (s + t) int64 s, int64 t int64 r movq (s,t),r
r = *( int8 *) (s + t) int64 s, int64 t int64 r movsbq (s,t),r
r = *( int16 *) (s + t) int64 s, int64 t int64 r movswq (s,t),r
r = *( int32 *) (s + t) int64 s, int64 t int64 r movslq (s,t),r
r = *( int64 *) (s + t) int64 s, int64 t int64 r movq (s,t),r
r = *(uint8 *) (s + t * 8) int64 s, int64 t int64 r movzbq (s,t,8),r
r = *(uint16 *) (s + t * 8) int64 s, int64 t int64 r movzwq (s,t,8),r
r = *(uint32 *) (s + t * 8) int64 s, int64 t int64 r movl (s,t,8),r%32
r = *(uint64 *) (s + t * 8) int64 s, int64 t int64 r movq (s,t,8),r
r = *( int8 *) (s + t * 8) int64 s, int64 t int64 r movsbq (s,t,8),r
r = *( int16 *) (s + t * 8) int64 s, int64 t int64 r movswq (s,t,8),r
r = *( int32 *) (s + t * 8) int64 s, int64 t int64 r movslq (s,t,8),r
r = *( int64 *) (s + t * 8) int64 s, int64 t int64 r movq (s,t,8),r
r = *(uint8 *) (s + n + t * 8) int64 s, int64 t, immediate n int64 r movzbq n(s,t,8),r
r = *(uint16 *) (s + n + t * 8) int64 s, int64 t, immediate n int64 r movzwq n(s,t,8),r
r = *(uint32 *) (s + n + t * 8) int64 s, int64 t, immediate n int64 r movl n(s,t,8),r%32
r = *(uint64 *) (s + n + t * 8) int64 s, int64 t, immediate n int64 r movq n(s,t,8),r
r = *( int8 *) (s + n + t * 8) int64 s, int64 t, immediate n int64 r movsbq n(s,t,8),r
r = *( int16 *) (s + n + t * 8) int64 s, int64 t, immediate n int64 r movswq n(s,t,8),r
r = *( int32 *) (s + n + t * 8) int64 s, int64 t, immediate n int64 r movslq n(s,t,8),r
r = *( int64 *) (s + n + t * 8) int64 s, int64 t, immediate n int64 r movq n(s,t,8),r
(uint32) r += *(uint32 *) (s + n) int64 r, int64 s, immediate n int64 r =, unsigned>, unsigned<, carry addl n(s),r%32
(uint32) r += *(uint32 *) (s + n) + carry int64 r, int64 s, immediate n carry int64 r =, unsigned>, unsigned<, carry adcl n(s),r%32
(uint32) r -= *(uint32 *) (s + n) int64 r, int64 s, immediate n int64 r =, unsigned>, unsigned< subl n(s),r%32
(uint32) r &= *(uint32 *) (s + n) int64 r, int64 s, immediate n int64 r =, unsigned>, unsigned< andl n(s),r%32
(uint32) r |= *(uint32 *) (s + n) int64 r, int64 s, immediate n int64 r =, unsigned>, unsigned< orl n(s),r%32
(uint32) r ^= *(uint32 *) (s + n) int64 r, int64 s, immediate n int64 r =, unsigned>, unsigned< xorl n(s),r%32
(uint32) r += *(uint32 *) (s + n + t * 8) int64 r, int64 s, int64 t, immediate n int64 r =, unsigned>, unsigned< addl n(s,t,8),r%32
(uint32) r -= *(uint32 *) (s + n + t * 8) int64 r, int64 s, int64 t, immediate n int64 r =, unsigned>, unsigned< subl n(s,t,8),r%32
(uint32) r &= *(uint32 *) (s + n + t * 8) int64 r, int64 s, int64 t, immediate n int64 r =, unsigned>, unsigned< andl n(s,t,8),r%32
(uint32) r |= *(uint32 *) (s + n + t * 8) int64 r, int64 s, int64 t, immediate n int64 r =, unsigned>, unsigned< orl n(s,t,8),r%32
(uint32) r ^= *(uint32 *) (s + n + t * 8) int64 r, int64 s, int64 t, immediate n int64 r =, unsigned>, unsigned< xorl n(s,t,8),r%32
(uint32) r += *(uint32 *) (s + t * 8) int64 r, int64 s, int64 t int64 r =, unsigned>, unsigned< addl (s,t,8),r%32
(uint32) r -= *(uint32 *) (s + t * 8) int64 r, int64 s, int64 t int64 r =, unsigned>, unsigned< subl (s,t,8),r%32
(uint32) r &= *(uint32 *) (s + t * 8) int64 r, int64 s, int64 t int64 r =, unsigned>, unsigned< andl (s,t,8),r%32
(uint32) r |= *(uint32 *) (s + t * 8) int64 r, int64 s, int64 t int64 r =, unsigned>, unsigned< orl (s,t,8),r%32
(uint32) r ^= *(uint32 *) (s + t * 8) int64 r, int64 s, int64 t int64 r =, unsigned>, unsigned< xorl (s,t,8),r%32
r += *(uint64 *) (s + n) int64 r, int64 s, immediate n int64 r =, unsigned>, unsigned<, carry addq n(s),r
r += *(uint64 *) (s + n) + carry int64 r, int64 s, immediate n carry int64 r =, unsigned>, unsigned<, carry adcq n(s),r
r -= *(uint64 *) (s + n) int64 r, int64 s, immediate n int64 r =, unsigned>, unsigned< subq n(s),r
r &= *(uint64 *) (s + n) int64 r, int64 s, immediate n int64 r =, unsigned>, unsigned< andq n(s),r
r |= *(uint64 *) (s + n) int64 r, int64 s, immediate n int64 r =, unsigned>, unsigned< orq n(s),r
r ^= *(uint64 *) (s + n) int64 r, int64 s, immediate n int64 r =, unsigned>, unsigned< xorq n(s),r
r = s stack64 s int64 r movq s,r
r = bottom s stack64 s int64 r movl s,r%32
r = top s stack64 s int64 r movl !shift4s,r%32
r += s int64 r, stack64 s int64 r =, unsigned>, unsigned< addq s,r
r += s + carry int64 r, stack64 s int64 r =, unsigned>, unsigned< adcq s,r
r -= s int64 r, stack64 s int64 r =, unsigned>, unsigned< subq s,r
r &= s int64 r, stack64 s int64 r =, unsigned>, unsigned< andq s,r
r |= s int64 r, stack64 s int64 r =, unsigned>, unsigned< orq s,r
r ^= s int64 r, stack64 s int64 r =, unsigned>, unsigned< xorq s,r
(uint32) r += s int64 r, stack64 s int64 r =, unsigned>, unsigned< addl s,r%32
r = &s stack128 s int64 r, stack128 s leaq s,r
r = ((uint32 *)&s)[0] stack128 s int64 r movl s,r%32
r = ((uint32 *)&s)[1] stack128 s int64 r movl 4+s,r%32
r = ((uint32 *)&s)[2] stack128 s int64 r movl 8+s,r%32
r = ((uint32 *)&s)[3] stack128 s int64 r movl 12+s,r%32
r = s stack128 s int6464 r movdqa s,r
r = *(int128 *) (s + n) int64 s, immediate n int6464 r movdqa n(s),r
int32323232 r += s int6464 r, stack128 s int6464 r paddd s,r
uint32323232 r += s int6464 r, stack128 s int6464 r paddd s,r
int32323232 r -= s int6464 r, stack128 s int6464 r psubd s,r
uint32323232 r -= s int6464 r, stack128 s int6464 r psubd s,r
uint32323232 r += *(int128 *) (s + n) int6464 r, int64 s, immediate n int6464 r paddd n(s),r
r = &s stack512 s int64 r, stack512 s leaq s,r
*(uint8 *) (s + n) = r int64 r, int64 s, immediate n movb r%8,n(s)
*(uint16 *) (s + n) = r int64 r, int64 s, immediate n movw r%16,n(s)
*(uint32 *) (s + n) = r int64 r, int64 s, immediate n movl r%32,n(s)
*(uint64 *) (s + n) = r int64 r, int64 s, immediate n movq r,n(s)
*( int8 *) (s + n) = r int64 r, int64 s, immediate n movb r%8,n(s)
*( int16 *) (s + n) = r int64 r, int64 s, immediate n movw r%16,n(s)
*( int32 *) (s + n) = r int64 r, int64 s, immediate n movl r%32,n(s)
*( int64 *) (s + n) = r int64 r, int64 s, immediate n movq r,n(s)
*(uint8 *) (s + n) = m int64 s, immediate m, immediate n movb $m,n(s)
*(uint16 *) (s + n) = m int64 s, immediate m, immediate n movw $m,n(s)
*(uint32 *) (s + n) = m int64 s, immediate m, immediate n movl $m,n(s)
*(uint64 *) (s + n) = m int64 s, immediate m, immediate n movq $m,n(s)
*( int8 *) (s + n) = m int64 s, immediate m, immediate n movb $m,n(s)
*( int16 *) (s + n) = m int64 s, immediate m, immediate n movw $m,n(s)
*( int32 *) (s + n) = m int64 s, immediate m, immediate n movl $m,n(s)
*( int64 *) (s + n) = m int64 s, immediate m, immediate n movq $m,n(s)
*(uint8 *) (s + t) = r int64 r, int64 s, int64 t movb r%8,(s,t)
*(uint16 *) (s + t) = r int64 r, int64 s, int64 t movw r%16,(s,t)
*(uint32 *) (s + t) = r int64 r, int64 s, int64 t movl r%32,(s,t)
*(uint64 *) (s + t) = r int64 r, int64 s, int64 t movq r,(s,t)
*( int8 *) (s + t) = r int64 r, int64 s, int64 t movb r%8,(s,t)
*( int16 *) (s + t) = r int64 r, int64 s, int64 t movw r%16,(s,t)
*( int32 *) (s + t) = r int64 r, int64 s, int64 t movl r%32,(s,t)
*( int64 *) (s + t) = r int64 r, int64 s, int64 t movq r,(s,t)
r = s int64 s stack64 r movq s,r
inplace r bottom = s int64 s, stack64 r stack64 r movl s%32,r
((uint32 *)&r)[0] = 0 stack128 r movl $0,r
((uint32 *)&r)[1] = 0 stack128 r stack128 r movl $0,4+r
((uint32 *)&r)[2] = 0 stack128 r stack128 r movl $0,8+r
((uint32 *)&r)[3] = 0 stack128 r stack128 r movl $0,12+r
r = s int6464 s stack128 r movdqa s,r

Supported integer arithmetic operations

Qhasm Instruction Input Evaluated flags Output Set flags Assembly Code
r = s int64 s int64 r mov s,r
r = s int3232 s int64 r movd s,r
r = s int64 s int3232 r movd s,r
r = s int6464 s int64 r movd s,r
r = s int64 s int6464 r movd s,r
r = (s >> 8) & 255 movzbl s%next8,r%32
(int128) t r = r * s int64 s =, unsigned>, unsigned< imul s
(uint128) t r = r * s int64 s =, unsigned>, unsigned< mul s
r &= s int64 r, int64 s int64 r =, unsigned>, unsigned< and s,r
r &= s int6464 r, int6464 s int6464 r pand s,r
r = s & 255 int64 s int64 r movzbl s%8,r%32
r = &n immediate n int64 r lea n(%rip),r
(uint32) r &= n int64 r, immediate n int64 r =, unsigned>, unsigned< and $n,r%32
r *= s int64 r, int64 s int64 r =, unsigned>, unsigned< imul s,r
r = s * n int64 s, immediate n int64 r =, unsigned>, unsigned< imul s,n,r
r += s int64 r, int64 s int64 r =, unsigned>, unsigned<, carry add s,r
r = s + t int64 s, int64 t int64 r lea (s,t),r
(uint32) r += s int64 r, int64 s int64 r =, unsigned>, unsigned<, carry add s%32,r%32
int32323232 r += s int6464 r, int6464 s int6464 r paddd s,r
uint32323232 r += s int6464 r, int6464 s int6464 r paddd s,r
r += s + carry int64 r, int64 s carry int64 r =, unsigned>, unsigned<, carry adc s,r
(uint32) r += s + carry int64 r, int64 s carry int64 r =, unsigned>, unsigned<, carry adc s%32,r%32
r = s + t + n int64 s, int64 t, immediate n int64 r lea n(s,t),r
r += t + n int64 r, int64 t, immediate n int64 r lea n(r,t),r
r += n int64 r, immediate n int64 r =, unsigned>, unsigned<, carry add $n,r
r += n + carry int64 r, immediate n carry int64 r =, unsigned>, unsigned<, carry adc $n,r
r = -r int64 r int64 r =, unsigned>, unsigned< neg r
r -= s int64 r, int64 s int64 r =, unsigned>, unsigned< sub s,r
r - s int64 r, int64 s =, unsigned>, unsigned< cmp s,r
(uint32) r -= s int64 r, int64 s int64 r =, unsigned>, unsigned< sub s%32,r%32
int32323232 r -= s int6464 r, int6464 s int6464 r psubd s,r
uint32323232 r -= s int6464 r, int6464 s int6464 r psubd s,r
r = -n immediate n int64 r mov $-n,r
r -= n int64 r, immediate n int64 r =, unsigned>, unsigned< sub $n,r
r - n int64 r, immediate n =, unsigned>, unsigned< cmp $n,r
r = (r.t) << s int64 r, int64 t int64 r =, unsigned>, unsigned< shld %cl,t,r
r = (r.t) << n int64 r, int64 t, immediate n int64 r =, unsigned>, unsigned< shld $n,t,r
r = 0 int6464 r pxor r,r
r <<= s int64 r int64 r =, unsigned>, unsigned< shl %cl,r
r <<<= s int64 r int64 r =, unsigned>, unsigned< rol %cl,r
r <<<= 0 int6464 r int6464 r pshufd $0xe4,r,r
r = s <<< 0 int6464 s int6464 r pshufd $0xe4,s,r
r <<<= 32 int6464 r int6464 r pshufd $0x93,r,r
r = s <<< 32 int6464 s int6464 r pshufd $0x93,s,r
r <<<= 64 int6464 r int6464 r pshufd $0x4e,r,r
r = s <<< 64 int6464 s int6464 r pshufd $0x4e,s,r
r <<<= 96 int6464 r int6464 r pshufd $0x39,r,r
r = s <<< 96 int6464 s int6464 r pshufd $0x39,s,r
r <<<= n int64 r, immediate n int64 r =, unsigned>, unsigned< rol $n,r
(uint32) r <<<= n int64 r, immediate n int64 r =, unsigned>, unsigned< rol $n,r%32
r <<= n int64 r, immediate n int64 r =, unsigned>, unsigned< shl $n,r
(uint32) r <<= n int64 r, immediate n int64 r =, unsigned>, unsigned< shl $n,r%32
int32323232 r <<= n int6464 r, immediate n int6464 r pslld $n,r
uint32323232 r <<= n int6464 r, immediate n int6464 r pslld $n,r
(int64) r >>= s int64 r int64 r =, unsigned>, unsigned< sar %cl,r
(uint64) r >>= s int64 r int64 r =, unsigned>, unsigned< shr %cl,r
r = (t r) >> s int64 r, int64 t int64 r =, unsigned>, unsigned< shrd %cl,t,r
r >>>= s int64 r int64 r =, unsigned>, unsigned< ror %cl,r
r >>>= n int64 r, immediate n int64 r =, unsigned>, unsigned< ror $n,r
(uint32) r >>>= n int64 r, immediate n int64 r =, unsigned>, unsigned< ror $n,r%32
(int64) r >>= n int64 r, immediate n int64 r =, unsigned>, unsigned< sar $n,r
(uint64) r >>= n int64 r, immediate n int64 r =, unsigned>, unsigned< shr $n,r
(uint32) r >>= n int64 r, immediate n int64 r =, unsigned>, unsigned< shr $n,r%32
(int32) r >>= n int64 r, immediate n int64 r =, unsigned>, unsigned< sar $n,r%32
r = (t r) >> n int64 r, int64 t, immediate n int64 r =, unsigned>, unsigned< shrd $n,t,r
int32323232 r >>= n int6464 r, immediate n int6464 r psrad $n,r
uint32323232 r >>= n int6464 r, immediate n int6464 r psrld $n,r
r ^= s int64 r, int64 s int64 r =, unsigned>, unsigned< xor s,r
r ^= s int6464 r, int6464 s int6464 r pxor s,r
(uint32) r ^= -n int64 r, immediate n int64 r =, unsigned>, unsigned< xor $-n,r%32
(uint32) r ^= n int64 r, immediate n int64 r =, unsigned>, unsigned< xor $n,r%32
assign 10 to r
assign 11 to r
assign 12 to r
assign 13 to r
assign 14 to r
assign 15 to r
assign 15 to r = (s >> 8) & 255 movzbl s%next8,r%32
assign 1 to r
assign 1 to r = (s >> 8) & 255 movzbl s%next8,r%32
assign 2 to r
assign 2 to r = (s >> 8) & 255 movzbl s%next8,r%32
assign 3 to r
assign 4 to r
assign 5 to r
assign 6 to r
assign 7 to r
assign 8 to r
assign 9 to r
r = n immediate n int64 r mov $n,r
while (n) { *y++ = x; --n } rep stosb
while (n) { *y++ = *x++; --n } rep movsb
r |= s int64 r, int64 s int64 r =, unsigned>, unsigned< or s,r
r |= s int6464 r, int6464 s int6464 r por s,r
(uint32) r |= n int64 r, immediate n int64 r =, unsigned>, unsigned< or $n,r%32
r = ~r int64 r int64 r not r
r ~&= s int6464 r, int6464 s int6464 r pandn s,r

Supported floating point operations

Supported floating point load/store operations

Qhasm Instruction Input Evaluated flags Output Set flags Assembly Code
r = *(float64 *) &n immediate n float80 r fldl n(%rip)
r = *(float64 *) (s + n) int64 s, immediate n float80 r fldl n(s)
r = *(int32 *) &n immediate n float80 r fildl n(%rip)
r = *(int32 *) (s + n) int64 s, immediate n float80 r fildl n(s)
r += *(float64 *) &n float80 r, immediate n float80 r faddl n(%rip)
r += *(float64 *) (s + n) float80 r, int64 s, immediate n float80 r faddl n(s)
r -= *(float64 *) &n float80 r, immediate n float80 r fsubl n(%rip)
r -= *(float64 *) (s + n) float80 r, int64 s, immediate n float80 r fsubl n(s)
r *= *(float64 *) &n float80 r, immediate n float80 r fmull n(%rip)
r *= *(float64 *) (s + n) float80 r, int64 s, immediate n float80 r fmull n(s)
r = *(float64 *) &s stack64 s float80 r fldl s
r += *(float64 *) &s float80 r, stack64 s float80 r faddl s
r -= *(float64 *) &s float80 r, stack64 s float80 r fsubl s
r *= *(float64 *) &s float80 r, stack64 s float80 r fmull s
*(float64 *) (s + n) = r float80 r, int64 s, immediate n fstpl n(s)!pop
*(float64 *) &s = r float80 r stack64 s fstpl s!pop

Supported floating point arithmetic operations

Qhasm Instruction Input Evaluated flags Output Set flags Assembly Code
r *= s float80 r, float80 s float80 r fmulp s,r!pop
r += s float80 r, float80 s float80 r faddp s,r!pop
r -= s float80 r, float80 s float80 r fsubrp s,r!pop!reverse
r -= s;negate float80 r, float80 s float80 r fsubp s,r!pop!reverse
r = 0 float80 r fldz
stackpop r float80 r fstp %st(0)
stacktop r float80 r fxch r

Supported misc operations

Supported misc misc operations

Qhasm Instruction Input Evaluated flags Output Set flags Assembly Code
nop nop
f# immediate f ._f!:
emms emms
round *(uint16 *) &n immediate n fldcw n(%rip)
r top = n immediate n stack64 r movl $n,!shift4r
caller r

Supported branch operations

Supported branch conditional operations

Qhasm Instruction Input Evaluated flags Output Set flags Assembly Code
goto f if = immediate f = je ._f
goto f if != immediate f = jne ._f
goto f if unsigned> immediate f unsigned> ja ._f
goto f if !unsigned> immediate f unsigned> jbe ._f
goto f if unsigned< immediate f unsigned< jb ._f
goto f if !unsigned< immediate f unsigned< jae ._f

Supported branch unconditional operations

Qhasm Instruction Input Evaluated flags Output Set flags Assembly Code
goto f immediate f jmp ._f

Supported declaration operations

Supported declaration misc operations

Qhasm Instruction Input Evaluated flags Output Set flags Assembly Code
float80 r
int3232 r
int6464 r
int64 r
stack128 r
stack256 r
stack32 r
stack512 r
stack64 r