1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512f,+avx512bw | FileCheck %s
define i32 @f(<16 x float> %A, <16 x float> %AA, i8* %B, <8 x double> %C, <8 x double> %CC, <8 x i64> %E, <8 x i64> %EE, <16 x i32> %F, <16 x i32> %FF, <32 x i16> %G, <32 x i16> %GG, <64 x i8> %H, <64 x i8> %HH, i32 * %loadptr) {
; CHECK-LABEL: f:
; CHECK: # %bb.0:
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbp, -16
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $64, %rsp
; CHECK-NEXT: vmovdqa64 144(%rbp), %zmm8
; CHECK-NEXT: vmovdqa64 16(%rbp), %zmm9
; CHECK-NEXT: movl (%rsi), %eax
; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
; CHECK-NEXT: vmovntps %zmm0, (%rdi)
; CHECK-NEXT: vpaddq %zmm5, %zmm4, %zmm0
; CHECK-NEXT: addl (%rsi), %eax
; CHECK-NEXT: vmovntdq %zmm0, (%rdi)
; CHECK-NEXT: vaddpd %zmm3, %zmm2, %zmm0
; CHECK-NEXT: addl (%rsi), %eax
; CHECK-NEXT: vmovntpd %zmm0, (%rdi)
; CHECK-NEXT: vpaddd %zmm7, %zmm6, %zmm0
; CHECK-NEXT: addl (%rsi), %eax
; CHECK-NEXT: vmovntdq %zmm0, (%rdi)
; CHECK-NEXT: vpaddw 80(%rbp), %zmm9, %zmm0
; CHECK-NEXT: addl (%rsi), %eax
; CHECK-NEXT: vmovntdq %zmm0, (%rdi)
; CHECK-NEXT: vpaddb 208(%rbp), %zmm8, %zmm0
; CHECK-NEXT: addl (%rsi), %eax
; CHECK-NEXT: vmovntdq %zmm0, (%rdi)
; CHECK-NEXT: addl (%rsi), %eax
; CHECK-NEXT: movq %rbp, %rsp
; CHECK-NEXT: popq %rbp
; CHECK-NEXT: .cfi_def_cfa %rsp, 8
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%v0 = load i32, i32* %loadptr, align 1
%cast = bitcast i8* %B to <16 x float>*
%A2 = fadd <16 x float> %A, %AA
store <16 x float> %A2, <16 x float>* %cast, align 64, !nontemporal !0
%v1 = load i32, i32* %loadptr, align 1
%cast1 = bitcast i8* %B to <8 x i64>*
%E2 = add <8 x i64> %E, %EE
store <8 x i64> %E2, <8 x i64>* %cast1, align 64, !nontemporal !0
%v2 = load i32, i32* %loadptr, align 1
%cast2 = bitcast i8* %B to <8 x double>*
%C2 = fadd <8 x double> %C, %CC
store <8 x double> %C2, <8 x double>* %cast2, align 64, !nontemporal !0
%v3 = load i32, i32* %loadptr, align 1
%cast3 = bitcast i8* %B to <16 x i32>*
%F2 = add <16 x i32> %F, %FF
store <16 x i32> %F2, <16 x i32>* %cast3, align 64, !nontemporal !0
%v4 = load i32, i32* %loadptr, align 1
%cast4 = bitcast i8* %B to <32 x i16>*
%G2 = add <32 x i16> %G, %GG
store <32 x i16> %G2, <32 x i16>* %cast4, align 64, !nontemporal !0
%v5 = load i32, i32* %loadptr, align 1
%cast5 = bitcast i8* %B to <64 x i8>*
%H2 = add <64 x i8> %H, %HH
store <64 x i8> %H2, <64 x i8>* %cast5, align 64, !nontemporal !0
%v6 = load i32, i32* %loadptr, align 1
%sum1 = add i32 %v0, %v1
%sum2 = add i32 %sum1, %v2
%sum3 = add i32 %sum2, %v3
%sum4 = add i32 %sum3, %v4
%sum5 = add i32 %sum4, %v5
%sum6 = add i32 %sum5, %v6
ret i32 %sum6
}
!0 = !{i32 1}
|