Dataset Viewer
file
stringlengths 6
20
| x86
stringlengths 902
260k
| arm
stringlengths 1.04k
269k
|
---|---|---|
natlog.s | .section __TEXT,__text,regular,pure_instructions
.build_version macos, 14, 0 sdk_version 15, 2
.section __TEXT,__literal8,8byte_literals
.p2align 3, 0x0 ## -- Begin function main
LCPI0_0:
.quad 0x3ff0000000000000 ## double 1
LCPI0_1:
.quad 0x3ff0000a7c5ac472 ## double 1.0000100000000001
.section __TEXT,__text,regular,pure_instructions
.globl _main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movsd LCPI0_0(%rip), %xmm0 ## xmm0 = mem[0],zero
movl $-100000, %eax ## imm = 0xFFFE7960
movsd LCPI0_1(%rip), %xmm1 ## xmm1 = mem[0],zero
.p2align 4, 0x90
LBB0_1: ## =>This Inner Loop Header: Depth=1
mulsd %xmm1, %xmm0
mulsd %xmm1, %xmm0
mulsd %xmm1, %xmm0
mulsd %xmm1, %xmm0
mulsd %xmm1, %xmm0
mulsd %xmm1, %xmm0
mulsd %xmm1, %xmm0
mulsd %xmm1, %xmm0
mulsd %xmm1, %xmm0
mulsd %xmm1, %xmm0
addl $10, %eax
jne LBB0_1
## %bb.2:
leaq L_.str(%rip), %rdi
movb $1, %al
callq _libmin_printf
callq _libmin_success
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "natlog: e=%f\n"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 14, 0 sdk_version 15, 2
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
fmov d0, #1.00000000
mov w8, #34465 ; =0x86a1
movk w8, #1, lsl #16
mov x9, #50290 ; =0xc472
movk x9, #31834, lsl #16
movk x9, #10, lsl #32
movk x9, #16368, lsl #48
LBB0_1: ; =>This Inner Loop Header: Depth=1
fmov d1, x9
fmul d0, d0, d1
sub w8, w8, #1
cmp w8, #1
b.hi LBB0_1
; %bb.2:
sub sp, sp, #32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
str d0, [sp]
Lloh0:
adrp x0, l_.str@PAGE
Lloh1:
add x0, x0, l_.str@PAGEOFF
bl _libmin_printf
bl _libmin_success
mov w0, #0 ; =0x0
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #32
ret
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "natlog: e=%f\n"
.subsections_via_symbols
|
quaternions.s | .section __TEXT,__text,regular,pure_instructions
.build_version macos, 14, 0 sdk_version 15, 2
.section __TEXT,__literal8,8byte_literals
.p2align 3, 0x0 ## -- Begin function quat_from_euler
LCPI0_0:
.quad 0x3fe0000000000000 ## double 0.5
.section __TEXT,__literal16,16byte_literals
.p2align 4, 0x0
LCPI0_1:
.quad 0x8000000000000000 ## double -0
.quad 0x8000000000000000 ## double -0
.section __TEXT,__text,regular,pure_instructions
.globl _quat_from_euler
.p2align 4, 0x90
_quat_from_euler: ## @quat_from_euler
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
subq $80, %rsp
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rdi, %rbx
testq %rsi, %rsi
je LBB0_1
## %bb.3:
movq %rsi, %r14
movsd 16(%rsi), %xmm0 ## xmm0 = mem[0],zero
mulsd LCPI0_0(%rip), %xmm0
callq _libmin_cos
movapd %xmm0, -96(%rbp) ## 16-byte Spill
movsd 16(%r14), %xmm0 ## xmm0 = mem[0],zero
mulsd LCPI0_0(%rip), %xmm0
callq _libmin_sin
movapd %xmm0, -80(%rbp) ## 16-byte Spill
movsd 8(%r14), %xmm0 ## xmm0 = mem[0],zero
mulsd LCPI0_0(%rip), %xmm0
callq _libmin_cos
movapd %xmm0, -64(%rbp) ## 16-byte Spill
movsd 8(%r14), %xmm0 ## xmm0 = mem[0],zero
mulsd LCPI0_0(%rip), %xmm0
callq _libmin_sin
movapd %xmm0, -48(%rbp) ## 16-byte Spill
movsd (%r14), %xmm0 ## xmm0 = mem[0],zero
mulsd LCPI0_0(%rip), %xmm0
callq _libmin_cos
movapd %xmm0, -32(%rbp) ## 16-byte Spill
movsd LCPI0_0(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd (%r14), %xmm0
callq _libmin_sin
movapd -48(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm1
mulsd %xmm0, %xmm1
movapd -32(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm4
movapd LCPI0_1(%rip), %xmm2 ## xmm2 = [-0.0E+0,-0.0E+0]
movapd %xmm4, %xmm3
movapd %xmm4, %xmm6
xorpd %xmm2, %xmm3
movddup -64(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = mem[0,0]
unpcklpd %xmm0, %xmm7 ## xmm7 = xmm7[0],xmm0[0]
mulpd %xmm4, %xmm7
movapd -80(%rbp), %xmm5 ## 16-byte Reload
movddup %xmm5, %xmm0 ## xmm0 = xmm5[0,0]
movapd %xmm1, %xmm4
unpcklpd %xmm3, %xmm4 ## xmm4 = xmm4[0],xmm3[0]
mulpd %xmm0, %xmm4
movapd -96(%rbp), %xmm3 ## 16-byte Reload
movddup %xmm3, %xmm0 ## xmm0 = xmm3[0,0]
mulpd %xmm7, %xmm0
addpd %xmm4, %xmm0
movupd %xmm0, (%rbx)
xorpd %xmm2, %xmm1
movapd %xmm5, %xmm0
unpcklpd %xmm3, %xmm0 ## xmm0 = xmm0[0],xmm3[0]
unpcklpd %xmm7, %xmm6 ## xmm6 = xmm6[0],xmm7[0]
shufpd $1, %xmm1, %xmm7 ## xmm7 = xmm7[1],xmm1[0]
mulpd %xmm0, %xmm7
movapd %xmm3, %xmm0
unpcklpd %xmm5, %xmm0 ## xmm0 = xmm0[0],xmm5[0]
mulpd %xmm6, %xmm0
addpd %xmm7, %xmm0
movupd %xmm0, 16(%rbx)
jmp LBB0_2
LBB0_1:
leaq L_.str(%rip), %rdi
leaq L___func__.quat_from_euler(%rip), %rsi
xorl %eax, %eax
callq _libmin_printf
xorpd %xmm0, %xmm0
movupd %xmm0, (%rbx)
movupd %xmm0, 16(%rbx)
LBB0_2:
movq %rbx, %rax
addq $80, %rsp
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__literal8,8byte_literals
.p2align 3, 0x0 ## -- Begin function euler_from_quat
LCPI1_0:
.quad 0x3ff0000000000000 ## double 1
.section __TEXT,__text,regular,pure_instructions
.globl _euler_from_quat
.p2align 4, 0x90
_euler_from_quat: ## @euler_from_quat
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rdi, %rbx
xorpd %xmm0, %xmm0
movupd %xmm0, (%rdi)
movq $0, 16(%rdi)
testq %rsi, %rsi
je LBB1_1
## %bb.3:
movq %rsi, %r14
movsd 8(%rsi), %xmm2 ## xmm2 = mem[0],zero
movsd 16(%rsi), %xmm1 ## xmm1 = mem[0],zero
movsd 24(%rsi), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm1, %xmm3
movsd (%rsi), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm2, %xmm0
addsd %xmm3, %xmm0
addsd %xmm0, %xmm0
mulsd %xmm1, %xmm1
mulsd %xmm2, %xmm2
addsd %xmm1, %xmm2
addsd %xmm2, %xmm2
movsd LCPI1_0(%rip), %xmm1 ## xmm1 = mem[0],zero
subsd %xmm2, %xmm1
callq _libmin_atan2
movsd %xmm0, (%rbx)
movsd (%r14), %xmm0 ## xmm0 = mem[0],zero
movsd 8(%r14), %xmm1 ## xmm1 = mem[0],zero
mulsd 24(%r14), %xmm1
mulsd 16(%r14), %xmm0
addsd %xmm1, %xmm0
addsd %xmm0, %xmm0
callq _libmin_asin
movsd %xmm0, 8(%rbx)
movsd 16(%r14), %xmm4 ## xmm4 = mem[0],zero
movsd 24(%r14), %xmm2 ## xmm2 = mem[0],zero
movsd 8(%r14), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm4, %xmm3
movsd (%r14), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm2, %xmm0
addsd %xmm3, %xmm0
addsd %xmm0, %xmm0
mulsd %xmm2, %xmm2
mulsd %xmm4, %xmm4
addsd %xmm2, %xmm4
addsd %xmm4, %xmm4
movsd LCPI1_0(%rip), %xmm1 ## xmm1 = mem[0],zero
subsd %xmm4, %xmm1
callq _libmin_atan2
movsd %xmm0, 16(%rbx)
jmp LBB1_2
LBB1_1:
leaq L_.str(%rip), %rdi
leaq L___func__.euler_from_quat(%rip), %rsi
xorl %eax, %eax
callq _libmin_printf
LBB1_2:
movq %rbx, %rax
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4, 0x0 ## -- Begin function quaternion_multiply
LCPI2_0:
.quad 0x8000000000000000 ## double -0
.quad 0x8000000000000000 ## double -0
.section __TEXT,__text,regular,pure_instructions
.globl _quaternion_multiply
.p2align 4, 0x90
_quaternion_multiply: ## @quaternion_multiply
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq %rdi, %rbx
xorpd %xmm0, %xmm0
movupd %xmm0, 16(%rdi)
movupd %xmm0, (%rdi)
testq %rsi, %rsi
je LBB2_2
## %bb.1:
testq %rdx, %rdx
je LBB2_2
## %bb.4:
movapd LCPI2_0(%rip), %xmm3 ## xmm3 = [-0.0E+0,-0.0E+0]
movsd 8(%rsi), %xmm2 ## xmm2 = mem[0],zero
movsd 16(%rsi), %xmm0 ## xmm0 = mem[0],zero
movapd %xmm2, %xmm1
xorpd %xmm3, %xmm1
xorpd %xmm0, %xmm3
movupd (%rdx), %xmm4
movupd 16(%rdx), %xmm6
movddup (%rsi), %xmm5 ## xmm5 = mem[0,0]
movapd %xmm5, %xmm7
movapd %xmm3, %xmm8
unpcklpd %xmm0, %xmm8 ## xmm8 = xmm8[0],xmm0[0]
mulpd %xmm6, %xmm8
mulpd %xmm6, %xmm5
shufpd $1, %xmm6, %xmm6 ## xmm6 = xmm6[1,0]
mulpd %xmm4, %xmm7
unpcklpd %xmm3, %xmm0 ## xmm0 = xmm0[0],xmm3[0]
mulpd %xmm4, %xmm0
shufpd $1, %xmm4, %xmm4 ## xmm4 = xmm4[1,0]
unpcklpd %xmm2, %xmm1 ## xmm1 = xmm1[0],xmm2[0]
movapd %xmm4, %xmm2
mulpd %xmm1, %xmm2
addpd %xmm2, %xmm7
addpd %xmm7, %xmm8
movddup 24(%rsi), %xmm2 ## xmm2 = mem[0,0]
mulpd %xmm2, %xmm4
mulpd %xmm6, %xmm2
subpd %xmm2, %xmm8
movupd %xmm8, (%rbx)
mulpd %xmm6, %xmm1
addpd %xmm1, %xmm5
addpd %xmm5, %xmm0
addpd %xmm0, %xmm4
movupd %xmm4, 16(%rbx)
jmp LBB2_3
LBB2_2:
leaq L_.str(%rip), %rdi
leaq L___func__.quaternion_multiply(%rip), %rsi
xorl %eax, %eax
callq _libmin_printf
LBB2_3:
movq %rbx, %rax
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__literal8,8byte_literals
.p2align 3, 0x0 ## -- Begin function main
LCPI3_0:
.quad 0x3fefffd7c6d0df55 ## double 0.99998081999999988
LCPI3_1:
.quad 0x3ef41c9790558000 ## double 1.9180000000118547E-5
LCPI3_2:
.quad 0x3ff0000000000000 ## double 1
LCPI3_3:
.quad 0x3fe0000000000000 ## double 0.5
LCPI3_4:
.quad 0xbfe6a0902de00d1b ## double -0.70709999999999995
LCPI3_5:
.quad 0x3f847ae147ae147b ## double 0.01
.section __TEXT,__text,regular,pure_instructions
.globl _main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $64, %rsp
movsd LCPI3_0(%rip), %xmm0 ## xmm0 = mem[0],zero
movsd LCPI3_1(%rip), %xmm1 ## xmm1 = mem[0],zero
callq _libmin_atan2
movsd %xmm0, -24(%rbp) ## 8-byte Spill
xorps %xmm0, %xmm0
callq _libmin_asin
movsd %xmm0, -8(%rbp) ## 8-byte Spill
movsd LCPI3_2(%rip), %xmm1 ## xmm1 = mem[0],zero
xorps %xmm0, %xmm0
callq _libmin_atan2
movaps %xmm0, %xmm2
movsd %xmm0, -16(%rbp) ## 8-byte Spill
leaq L_.str.1(%rip), %rdi
movsd -8(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd -24(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movb $3, %al
callq _libmin_printf
movsd -16(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI3_3(%rip), %xmm0
movsd %xmm0, -16(%rbp) ## 8-byte Spill
callq _libmin_cos
movsd %xmm0, -56(%rbp) ## 8-byte Spill
movsd -16(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
callq _libmin_sin
movsd %xmm0, -48(%rbp) ## 8-byte Spill
movsd -8(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI3_3(%rip), %xmm0
movsd %xmm0, -8(%rbp) ## 8-byte Spill
callq _libmin_cos
movsd %xmm0, -16(%rbp) ## 8-byte Spill
movsd -8(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
callq _libmin_sin
movsd %xmm0, -8(%rbp) ## 8-byte Spill
movsd -24(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI3_3(%rip), %xmm0
movsd %xmm0, -24(%rbp) ## 8-byte Spill
callq _libmin_cos
movsd %xmm0, -32(%rbp) ## 8-byte Spill
movsd -24(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
callq _libmin_sin
movsd -16(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd %xmm4, %xmm3
movsd -32(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd %xmm10, %xmm3
movsd -8(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movapd %xmm6, %xmm1
mulsd %xmm0, %xmm1
movsd -48(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
movapd %xmm8, %xmm2
mulsd %xmm1, %xmm2
movapd %xmm3, %xmm9
movsd -56(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm9
addsd %xmm2, %xmm9
movsd %xmm9, -40(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm4
mulsd %xmm10, %xmm6
movapd %xmm8, %xmm2
movapd %xmm8, %xmm0
mulsd %xmm6, %xmm0
mulsd %xmm8, %xmm3
mulsd %xmm4, %xmm2
mulsd %xmm7, %xmm4
subsd %xmm0, %xmm4
mulsd %xmm7, %xmm6
addsd %xmm2, %xmm6
mulsd %xmm7, %xmm1
subsd %xmm1, %xmm3
leaq L_.str.2(%rip), %rdi
movapd %xmm9, %xmm0
movsd %xmm4, -16(%rbp) ## 8-byte Spill
movapd %xmm4, %xmm1
movsd %xmm6, -8(%rbp) ## 8-byte Spill
movapd %xmm6, %xmm2
movsd %xmm3, -24(%rbp) ## 8-byte Spill
movb $4, %al
callq _libmin_printf
movsd -40(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd LCPI3_4(%rip), %xmm0
callq _libmin_fabs
movsd LCPI3_5(%rip), %xmm1 ## xmm1 = mem[0],zero
ucomisd %xmm0, %xmm1
ja LBB3_2
## %bb.1:
movl $1, %edi
callq _libmin_fail
LBB3_2:
movsd -16(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd LCPI3_4(%rip), %xmm0
callq _libmin_fabs
movsd LCPI3_5(%rip), %xmm1 ## xmm1 = mem[0],zero
ucomisd %xmm0, %xmm1
ja LBB3_4
## %bb.3:
movl $1, %edi
callq _libmin_fail
LBB3_4:
movsd -8(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
callq _libmin_fabs
movsd LCPI3_5(%rip), %xmm1 ## xmm1 = mem[0],zero
ucomisd %xmm0, %xmm1
ja LBB3_6
## %bb.5:
movl $1, %edi
callq _libmin_fail
LBB3_6:
movsd -24(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
callq _libmin_fabs
movsd LCPI3_5(%rip), %xmm1 ## xmm1 = mem[0],zero
ucomisd %xmm0, %xmm1
ja LBB3_8
## %bb.7:
movl $1, %edi
callq _libmin_fail
LBB3_8:
leaq L_.str.3(%rip), %rdi
xorl %eax, %eax
callq _libmin_printf
callq _libmin_success
xorl %eax, %eax
addq $64, %rsp
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "%s: Invalid input."
L___func__.quat_from_euler: ## @__func__.quat_from_euler
.asciz "quat_from_euler"
L___func__.euler_from_quat: ## @__func__.euler_from_quat
.asciz "euler_from_quat"
L___func__.quaternion_multiply: ## @__func__.quaternion_multiply
.asciz "quaternion_multiply"
L_.str.1: ## @.str.1
.asciz "Euler: %.4lf, %.4lf, %.4lf\n"
L_.str.2: ## @.str.2
.asciz "Quaternion: %.4lf %+.4lf %+.4lf %+.4lf\n"
L_.str.3: ## @.str.3
.asciz "All tests passed!\n"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 14, 0 sdk_version 15, 2
.globl _quat_from_euler ; -- Begin function quat_from_euler
.p2align 2
_quat_from_euler: ; @quat_from_euler
.cfi_startproc
; %bb.0:
sub sp, sp, #96
stp d13, d12, [sp, #16] ; 16-byte Folded Spill
stp d11, d10, [sp, #32] ; 16-byte Folded Spill
stp d9, d8, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset b8, -40
.cfi_offset b9, -48
.cfi_offset b10, -56
.cfi_offset b11, -64
.cfi_offset b12, -72
.cfi_offset b13, -80
cbz x0, LBB0_2
; %bb.1:
mov x19, x0
ldr d0, [x0, #16]
fmov d13, #0.50000000
fmul d0, d0, d13
bl _libmin_cos
fmov d8, d0
ldr d0, [x19, #16]
fmul d0, d0, d13
bl _libmin_sin
fmov d9, d0
ldr d0, [x19, #8]
fmul d0, d0, d13
bl _libmin_cos
fmov d10, d0
ldr d0, [x19, #8]
fmul d0, d0, d13
bl _libmin_sin
fmov d11, d0
ldr d0, [x19]
fmul d0, d0, d13
bl _libmin_cos
fmov d12, d0
ldr d0, [x19]
fmul d0, d0, d13
bl _libmin_sin
fmov d1, d0
fmul d3, d10, d12
fmul d4, d11, d0
fmul d0, d9, d4
fmadd d0, d3, d8, d0
fmul d2, d10, d1
fmul d5, d11, d12
fnmul d1, d5, d9
fmadd d1, d2, d8, d1
fmul d2, d9, d2
fmadd d2, d5, d8, d2
fnmul d4, d4, d8
fmadd d3, d3, d9, d4
b LBB0_3
LBB0_2:
Lloh0:
adrp x8, l___func__.quat_from_euler@PAGE
Lloh1:
add x8, x8, l___func__.quat_from_euler@PAGEOFF
str x8, [sp]
Lloh2:
adrp x0, l_.str@PAGE
Lloh3:
add x0, x0, l_.str@PAGEOFF
bl _libmin_printf
movi d0, #0000000000000000
movi d1, #0000000000000000
movi d2, #0000000000000000
movi d3, #0000000000000000
LBB0_3:
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp d9, d8, [sp, #48] ; 16-byte Folded Reload
ldp d11, d10, [sp, #32] ; 16-byte Folded Reload
ldp d13, d12, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #96
ret
.loh AdrpAdd Lloh2, Lloh3
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.globl _euler_from_quat ; -- Begin function euler_from_quat
.p2align 2
_euler_from_quat: ; @euler_from_quat
.cfi_startproc
; %bb.0:
sub sp, sp, #80
stp d11, d10, [sp, #16] ; 16-byte Folded Spill
stp d9, d8, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset b8, -40
.cfi_offset b9, -48
.cfi_offset b10, -56
.cfi_offset b11, -64
cbz x0, LBB1_2
; %bb.1:
mov x19, x0
ldp d0, d1, [x0]
ldp d2, d3, [x0, #16]
fmul d3, d2, d3
fmadd d0, d0, d1, d3
fadd d0, d0, d0
fmul d2, d2, d2
fmadd d1, d1, d1, d2
fmov d10, #1.00000000
fmov d11, #-2.00000000
fmadd d1, d1, d11, d10
bl _libmin_atan2
fmov d8, d0
ldp d0, d1, [x19]
ldp d2, d3, [x19, #16]
fmul d1, d1, d3
fmadd d0, d0, d2, d1
fadd d0, d0, d0
bl _libmin_asin
fmov d9, d0
ldp d0, d1, [x19]
ldp d3, d2, [x19, #16]
fmul d1, d1, d3
fmadd d0, d0, d2, d1
fadd d0, d0, d0
fmul d1, d2, d2
fmadd d1, d3, d3, d1
fmadd d1, d1, d11, d10
bl _libmin_atan2
fmov d2, d0
b LBB1_3
LBB1_2:
Lloh4:
adrp x8, l___func__.euler_from_quat@PAGE
Lloh5:
add x8, x8, l___func__.euler_from_quat@PAGEOFF
str x8, [sp]
Lloh6:
adrp x0, l_.str@PAGE
Lloh7:
add x0, x0, l_.str@PAGEOFF
bl _libmin_printf
movi d9, #0000000000000000
movi d2, #0000000000000000
movi d8, #0000000000000000
LBB1_3:
fmov d0, d8
fmov d1, d9
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp d9, d8, [sp, #32] ; 16-byte Folded Reload
ldp d11, d10, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #80
ret
.loh AdrpAdd Lloh6, Lloh7
.loh AdrpAdd Lloh4, Lloh5
.cfi_endproc
; -- End function
.globl _quaternion_multiply ; -- Begin function quaternion_multiply
.p2align 2
_quaternion_multiply: ; @quaternion_multiply
.cfi_startproc
; %bb.0:
sub sp, sp, #32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
cbz x0, LBB2_3
; %bb.1:
cbz x1, LBB2_3
; %bb.2:
ldp d3, d4, [x0]
ldp d5, d6, [x1]
fnmul d0, d4, d6
fmadd d0, d3, d5, d0
ldp d7, d16, [x0, #16]
ldp d17, d18, [x1, #16]
fmsub d0, d7, d17, d0
fmsub d0, d16, d18, d0
fmul d1, d5, d4
fmadd d1, d3, d6, d1
fmadd d1, d7, d18, d1
fmsub d1, d16, d17, d1
fnmul d2, d4, d18
fmadd d2, d3, d17, d2
fmadd d2, d7, d5, d2
fmadd d2, d16, d6, d2
fmul d4, d4, d17
fmadd d3, d3, d18, d4
fmsub d3, d7, d6, d3
fmadd d3, d16, d5, d3
b LBB2_4
LBB2_3:
Lloh8:
adrp x8, l___func__.quaternion_multiply@PAGE
Lloh9:
add x8, x8, l___func__.quaternion_multiply@PAGEOFF
str x8, [sp]
Lloh10:
adrp x0, l_.str@PAGE
Lloh11:
add x0, x0, l_.str@PAGEOFF
bl _libmin_printf
movi d2, #0000000000000000
movi d3, #0000000000000000
movi d1, #0000000000000000
movi d0, #0000000000000000
LBB2_4:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #32
ret
.loh AdrpAdd Lloh10, Lloh11
.loh AdrpAdd Lloh8, Lloh9
.cfi_endproc
; -- End function
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #112
stp d15, d14, [sp, #32] ; 16-byte Folded Spill
stp d13, d12, [sp, #48] ; 16-byte Folded Spill
stp d11, d10, [sp, #64] ; 16-byte Folded Spill
stp d9, d8, [sp, #80] ; 16-byte Folded Spill
stp x29, x30, [sp, #96] ; 16-byte Folded Spill
add x29, sp, #96
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset b8, -24
.cfi_offset b9, -32
.cfi_offset b10, -40
.cfi_offset b11, -48
.cfi_offset b12, -56
.cfi_offset b13, -64
.cfi_offset b14, -72
.cfi_offset b15, -80
mov x8, #57173 ; =0xdf55
movk x8, #50896, lsl #16
movk x8, #65495, lsl #32
movk x8, #16367, lsl #48
fmov d0, x8
mov x8, #32768 ; =0x8000
movk x8, #36949, lsl #16
movk x8, #7319, lsl #32
movk x8, #16116, lsl #48
fmov d1, x8
bl _libmin_atan2
fmov d8, d0
movi d0, #0000000000000000
bl _libmin_asin
fmov d9, d0
movi d0, #0000000000000000
fmov d1, #1.00000000
bl _libmin_atan2
fmov d10, d0
stp d8, d0, [sp, #8]
str d9, [sp]
Lloh12:
adrp x0, l_.str.1@PAGE
Lloh13:
add x0, x0, l_.str.1@PAGEOFF
bl _libmin_printf
fmov d13, #0.50000000
fmul d10, d10, d13
fmov d0, d10
bl _libmin_cos
fmov d11, d0
fmov d0, d10
bl _libmin_sin
fmov d10, d0
fmul d9, d9, d13
fmov d0, d9
bl _libmin_cos
fmov d12, d0
fmov d0, d9
bl _libmin_sin
fmov d9, d0
fmul d8, d8, d13
fmov d0, d8
bl _libmin_cos
fmov d13, d0
fmov d0, d8
bl _libmin_sin
fmul d1, d12, d13
fmul d2, d9, d0
fmul d3, d10, d2
fmadd d14, d1, d11, d3
fmul d0, d12, d0
fmul d3, d9, d13
fnmul d4, d3, d10
fmadd d12, d0, d11, d4
fmul d0, d10, d0
fmadd d9, d3, d11, d0
fnmul d0, d2, d11
fmadd d8, d1, d10, d0
stp d9, d8, [sp, #16]
stp d14, d12, [sp]
Lloh14:
adrp x0, l_.str.2@PAGE
Lloh15:
add x0, x0, l_.str.2@PAGEOFF
bl _libmin_printf
mov x8, #3355 ; =0xd1b
movk x8, #11744, lsl #16
movk x8, #41104, lsl #32
movk x8, #49126, lsl #48
fmov d0, x8
fadd d0, d14, d0
bl _libmin_fabs
mov x8, #5243 ; =0x147b
movk x8, #18350, lsl #16
movk x8, #31457, lsl #32
movk x8, #16260, lsl #48
fmov d1, x8
fcmp d0, d1
b.mi LBB3_2
; %bb.1:
mov w0, #1 ; =0x1
bl _libmin_fail
LBB3_2:
mov x8, #3355 ; =0xd1b
movk x8, #11744, lsl #16
movk x8, #41104, lsl #32
movk x8, #49126, lsl #48
fmov d0, x8
fadd d0, d12, d0
bl _libmin_fabs
mov x8, #5243 ; =0x147b
movk x8, #18350, lsl #16
movk x8, #31457, lsl #32
movk x8, #16260, lsl #48
fmov d1, x8
fcmp d0, d1
b.mi LBB3_4
; %bb.3:
mov w0, #1 ; =0x1
bl _libmin_fail
LBB3_4:
fmov d0, d9
bl _libmin_fabs
mov x8, #5243 ; =0x147b
movk x8, #18350, lsl #16
movk x8, #31457, lsl #32
movk x8, #16260, lsl #48
fmov d1, x8
fcmp d0, d1
b.mi LBB3_6
; %bb.5:
mov w0, #1 ; =0x1
bl _libmin_fail
LBB3_6:
fmov d0, d8
bl _libmin_fabs
mov x8, #5243 ; =0x147b
movk x8, #18350, lsl #16
movk x8, #31457, lsl #32
movk x8, #16260, lsl #48
fmov d1, x8
fcmp d0, d1
b.mi LBB3_8
; %bb.7:
mov w0, #1 ; =0x1
bl _libmin_fail
LBB3_8:
Lloh16:
adrp x0, l_.str.3@PAGE
Lloh17:
add x0, x0, l_.str.3@PAGEOFF
bl _libmin_printf
bl _libmin_success
mov w0, #0 ; =0x0
ldp x29, x30, [sp, #96] ; 16-byte Folded Reload
ldp d9, d8, [sp, #80] ; 16-byte Folded Reload
ldp d11, d10, [sp, #64] ; 16-byte Folded Reload
ldp d13, d12, [sp, #48] ; 16-byte Folded Reload
ldp d15, d14, [sp, #32] ; 16-byte Folded Reload
add sp, sp, #112
ret
.loh AdrpAdd Lloh14, Lloh15
.loh AdrpAdd Lloh12, Lloh13
.loh AdrpAdd Lloh16, Lloh17
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "%s: Invalid input."
l___func__.quat_from_euler: ; @__func__.quat_from_euler
.asciz "quat_from_euler"
l___func__.euler_from_quat: ; @__func__.euler_from_quat
.asciz "euler_from_quat"
l___func__.quaternion_multiply: ; @__func__.quaternion_multiply
.asciz "quaternion_multiply"
l_.str.1: ; @.str.1
.asciz "Euler: %.4lf, %.4lf, %.4lf\n"
l_.str.2: ; @.str.2
.asciz "Quaternion: %.4lf %+.4lf %+.4lf %+.4lf\n"
l_.str.3: ; @.str.3
.asciz "All tests passed!\n"
.subsections_via_symbols
|
anagram.s | "\t.section\t__TEXT,__text,regular,pure_instructions\n\t.build_version macos, 14, 0\tsdk_version 15,(...TRUNCATED) | "\t.section\t__TEXT,__text,regular,pure_instructions\n\t.build_version macos, 14, 0\tsdk_version 15,(...TRUNCATED) |
parrondo.s | "\t.section\t__TEXT,__text,regular,pure_instructions\n\t.build_version macos, 14, 0\tsdk_version 15,(...TRUNCATED) | "\t.section\t__TEXT,__text,regular,pure_instructions\n\t.build_version macos, 14, 0\tsdk_version 15,(...TRUNCATED) |
c-interp.s | "\t.section\t__TEXT,__text,regular,pure_instructions\n\t.build_version macos, 14, 0\tsdk_version 15,(...TRUNCATED) | "\t.section\t__TEXT,__text,regular,pure_instructions\n\t.build_version macos, 14, 0\tsdk_version 15,(...TRUNCATED) |
pascal.s | "\t.section\t__TEXT,__text,regular,pure_instructions\n\t.build_version macos, 14, 0\tsdk_version 15,(...TRUNCATED) | "\t.section\t__TEXT,__text,regular,pure_instructions\n\t.build_version macos, 14, 0\tsdk_version 15,(...TRUNCATED) |
mandelbrot.s | "\t.section\t__TEXT,__text,regular,pure_instructions\n\t.build_version macos, 14, 0\tsdk_version 15,(...TRUNCATED) | "\t.section\t__TEXT,__text,regular,pure_instructions\n\t.build_version macos, 14, 0\tsdk_version 15,(...TRUNCATED) |
fuzzy-match.s | "\t.section\t__TEXT,__text,regular,pure_instructions\n\t.build_version macos, 14, 0\tsdk_version 15,(...TRUNCATED) | "\t.section\t__TEXT,__text,regular,pure_instructions\n\t.build_version macos, 14, 0\tsdk_version 15,(...TRUNCATED) |
quine.s | "\t.section\t__TEXT,__text,regular,pure_instructions\n\t.build_version macos, 14, 0\tsdk_version 15,(...TRUNCATED) | "\t.section\t__TEXT,__text,regular,pure_instructions\n\t.build_version macos, 14, 0\tsdk_version 15,(...TRUNCATED) |
mersenne.s | "\t.section\t__TEXT,__text,regular,pure_instructions\n\t.build_version macos, 14, 0\tsdk_version 15,(...TRUNCATED) | "\t.section\t__TEXT,__text,regular,pure_instructions\n\t.build_version macos, 14, 0\tsdk_version 15,(...TRUNCATED) |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 84