1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
| ; RUN: llc %s -o - -mtriple=aarch64-unknown -mattr=+fuse-aes,+crypto | FileCheck %s
; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=generic -mattr=+crypto | FileCheck %s
; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a53 | FileCheck %s
; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a57 | FileCheck %s
; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a72 | FileCheck %s
; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a73 | FileCheck %s
; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m1 | FileCheck %s
; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m2 | FileCheck %s
; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m3 | FileCheck %s
; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m4 | FileCheck %s
; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m5 | FileCheck %s
declare <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d, <16 x i8> %k)
declare <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %d)
declare <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d, <16 x i8> %k)
declare <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %d)
define void @aesea(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d, <16 x i8> %e) {
%d0 = load <16 x i8>, <16 x i8>* %a0
%a1 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 1
%d1 = load <16 x i8>, <16 x i8>* %a1
%a2 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 2
%d2 = load <16 x i8>, <16 x i8>* %a2
%a3 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 3
%d3 = load <16 x i8>, <16 x i8>* %a3
%k0 = load <16 x i8>, <16 x i8>* %b0
%e00 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d0, <16 x i8> %k0)
%f00 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e00)
%e01 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d1, <16 x i8> %k0)
%f01 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e01)
%e02 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d2, <16 x i8> %k0)
%f02 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e02)
%e03 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d3, <16 x i8> %k0)
%f03 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e03)
%b1 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 1
%k1 = load <16 x i8>, <16 x i8>* %b1
%e10 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f00, <16 x i8> %k1)
%f10 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e00)
%e11 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f01, <16 x i8> %k1)
%f11 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e01)
%e12 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f02, <16 x i8> %k1)
%f12 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e02)
%e13 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f03, <16 x i8> %k1)
%f13 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e03)
%b2 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 2
%k2 = load <16 x i8>, <16 x i8>* %b2
%e20 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f10, <16 x i8> %k2)
%f20 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e10)
%e21 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f11, <16 x i8> %k2)
%f21 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e11)
%e22 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f12, <16 x i8> %k2)
%f22 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e12)
%e23 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f13, <16 x i8> %k2)
%f23 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e13)
%b3 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 3
%k3 = load <16 x i8>, <16 x i8>* %b3
%e30 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f20, <16 x i8> %k3)
%f30 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e20)
%e31 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f21, <16 x i8> %k3)
%f31 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e21)
%e32 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f22, <16 x i8> %k3)
%f32 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e22)
%e33 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f23, <16 x i8> %k3)
%f33 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e23)
%g0 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f30, <16 x i8> %d)
%h0 = xor <16 x i8> %g0, %e
%g1 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f31, <16 x i8> %d)
%h1 = xor <16 x i8> %g1, %e
%g2 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f32, <16 x i8> %d)
%h2 = xor <16 x i8> %g2, %e
%g3 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f33, <16 x i8> %d)
%h3 = xor <16 x i8> %g3, %e
store <16 x i8> %h0, <16 x i8>* %c0
%c1 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 1
store <16 x i8> %h1, <16 x i8>* %c1
%c2 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 2
store <16 x i8> %h2, <16 x i8>* %c2
%c3 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 3
store <16 x i8> %h3, <16 x i8>* %c3
ret void
; CHECK-LABEL: aesea:
; CHECK: aese [[VA:v[0-7].16b]], {{v[0-7].16b}}
; CHECK-NEXT: aesmc [[VA]], [[VA]]
; CHECK: aese [[VB:v[0-7].16b]], {{v[0-7].16b}}
; CHECK-NEXT: aesmc [[VB]], [[VB]]
; CHECK: aese [[VC:v[0-7].16b]], {{v[0-7].16b}}
; CHECK-NEXT: aesmc [[VC]], [[VC]]
; CHECK: aese [[VD:v[0-7].16b]], {{v[0-7].16b}}
; CHECK-NEXT: aesmc [[VD]], [[VD]]
; CHECK: aese [[VE:v[0-7].16b]], {{v[0-7].16b}}
; CHECK-NEXT: aesmc [[VE]], [[VE]]
; CHECK: aese [[VF:v[0-7].16b]], {{v[0-7].16b}}
; CHECK-NEXT: aesmc [[VF]], [[VF]]
; CHECK: aese [[VG:v[0-7].16b]], {{v[0-7].16b}}
; CHECK-NEXT: aesmc [[VG]], [[VG]]
; CHECK: aese [[VH:v[0-7].16b]], {{v[0-7].16b}}
; CHECK-NEXT: aesmc [[VH]], [[VH]]
; CHECK-NOT: aesmc
}
define void @aesda(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d, <16 x i8> %e) {
%d0 = load <16 x i8>, <16 x i8>* %a0
%a1 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 1
%d1 = load <16 x i8>, <16 x i8>* %a1
%a2 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 2
%d2 = load <16 x i8>, <16 x i8>* %a2
%a3 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 3
%d3 = load <16 x i8>, <16 x i8>* %a3
%k0 = load <16 x i8>, <16 x i8>* %b0
%e00 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d0, <16 x i8> %k0)
%f00 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e00)
%e01 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d1, <16 x i8> %k0)
%f01 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e01)
%e02 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d2, <16 x i8> %k0)
%f02 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e02)
%e03 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d3, <16 x i8> %k0)
%f03 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e03)
%b1 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 1
%k1 = load <16 x i8>, <16 x i8>* %b1
%e10 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f00, <16 x i8> %k1)
%f10 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e00)
%e11 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f01, <16 x i8> %k1)
%f11 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e01)
%e12 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f02, <16 x i8> %k1)
%f12 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e02)
%e13 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f03, <16 x i8> %k1)
%f13 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e03)
%b2 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 2
%k2 = load <16 x i8>, <16 x i8>* %b2
%e20 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f10, <16 x i8> %k2)
%f20 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e10)
%e21 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f11, <16 x i8> %k2)
%f21 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e11)
%e22 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f12, <16 x i8> %k2)
%f22 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e12)
%e23 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f13, <16 x i8> %k2)
%f23 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e13)
%b3 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 3
%k3 = load <16 x i8>, <16 x i8>* %b3
%e30 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f20, <16 x i8> %k3)
%f30 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e20)
%e31 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f21, <16 x i8> %k3)
%f31 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e21)
%e32 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f22, <16 x i8> %k3)
%f32 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e22)
%e33 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f23, <16 x i8> %k3)
%f33 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e23)
%g0 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f30, <16 x i8> %d)
%h0 = xor <16 x i8> %g0, %e
%g1 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f31, <16 x i8> %d)
%h1 = xor <16 x i8> %g1, %e
%g2 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f32, <16 x i8> %d)
%h2 = xor <16 x i8> %g2, %e
%g3 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f33, <16 x i8> %d)
%h3 = xor <16 x i8> %g3, %e
store <16 x i8> %h0, <16 x i8>* %c0
%c1 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 1
store <16 x i8> %h1, <16 x i8>* %c1
%c2 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 2
store <16 x i8> %h2, <16 x i8>* %c2
%c3 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 3
store <16 x i8> %h3, <16 x i8>* %c3
ret void
; CHECK-LABEL: aesda:
; CHECK: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}}
; CHECK-NEXT: aesimc [[VA]], [[VA]]
; CHECK: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}}
; CHECK-NEXT: aesimc [[VB]], [[VB]]
; CHECK: aesd [[VC:v[0-7].16b]], {{v[0-7].16b}}
; CHECK-NEXT: aesimc [[VC]], [[VC]]
; CHECK: aesd [[VD:v[0-7].16b]], {{v[0-7].16b}}
; CHECK-NEXT: aesimc [[VD]], [[VD]]
; CHECK: aesd [[VE:v[0-7].16b]], {{v[0-7].16b}}
; CHECK-NEXT: aesimc [[VE]], [[VE]]
; CHECK: aesd [[VF:v[0-7].16b]], {{v[0-7].16b}}
; CHECK-NEXT: aesimc [[VF]], [[VF]]
; CHECK: aesd [[VG:v[0-7].16b]], {{v[0-7].16b}}
; CHECK-NEXT: aesimc [[VG]], [[VG]]
; CHECK: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}}
; CHECK-NEXT: aesimc [[VH]], [[VH]]
; CHECK-NOT: aesimc
}
define void @aes_load_store(<16 x i8> *%p1, <16 x i8> *%p2 , <16 x i8> *%p3) {
entry:
%x1 = alloca <16 x i8>, align 16
%x2 = alloca <16 x i8>, align 16
%x3 = alloca <16 x i8>, align 16
%x4 = alloca <16 x i8>, align 16
%x5 = alloca <16 x i8>, align 16
%in1 = load <16 x i8>, <16 x i8>* %p1, align 16
store <16 x i8> %in1, <16 x i8>* %x1, align 16
%aese1 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %in1, <16 x i8> %in1) #2
%in2 = load <16 x i8>, <16 x i8>* %p2, align 16
%aesmc1= call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %aese1) #2
%aese2 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %in1, <16 x i8> %in2) #2
store <16 x i8> %aesmc1, <16 x i8>* %x3, align 16
%in3 = load <16 x i8>, <16 x i8>* %p3, align 16
%aesmc2= call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %aese2) #2
%aese3 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %aesmc2, <16 x i8> %in3) #2
store <16 x i8> %aese3, <16 x i8>* %x5, align 16
ret void
; CHECK-LABEL: aes_load_store:
; CHECK: aese [[VA:v[0-7].16b]], {{v[0-7].16b}}
; aese and aesmc are described to share a unit, hence won't be scheduled on the
; same cycle and the scheduler can find another instruction to place inbetween
; CHECK: aesmc [[VA]], [[VA]]
; CHECK: aese [[VB:v[0-7].16b]], {{v[0-7].16b}}
; CHECK-NEXT: aesmc [[VB]], [[VB]]
; CHECK-NOT: aesmc
}
|