1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
| ; Test 32-bit floating-point strict subtraction.
;
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
declare float @foo()
declare float @llvm.experimental.constrained.fsub.f32(float, float, metadata, metadata)
; Check register subtraction.
define float @f1(float %f1, float %f2) #0 {
; CHECK-LABEL: f1:
; CHECK: sebr %f0, %f2
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.fsub.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret float %res
}
; Check the low end of the SEB range.
define float @f2(float %f1, float *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: seb %f0, 0(%r2)
; CHECK: br %r14
%f2 = load float, float *%ptr
%res = call float @llvm.experimental.constrained.fsub.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret float %res
}
; Check the high end of the aligned SEB range.
define float @f3(float %f1, float *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: seb %f0, 4092(%r2)
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 1023
%f2 = load float, float *%ptr
%res = call float @llvm.experimental.constrained.fsub.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret float %res
}
; Check the next word up, which needs separate address logic.
; Other sequences besides this one would be OK.
define float @f4(float %f1, float *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: seb %f0, 0(%r2)
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 1024
%f2 = load float, float *%ptr
%res = call float @llvm.experimental.constrained.fsub.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret float %res
}
; Check negative displacements, which also need separate address logic.
define float @f5(float %f1, float *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -4
; CHECK: seb %f0, 0(%r2)
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 -1
%f2 = load float, float *%ptr
%res = call float @llvm.experimental.constrained.fsub.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret float %res
}
; Check that SEB allows indices.
define float @f6(float %f1, float *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 2
; CHECK: seb %f0, 400(%r1,%r2)
; CHECK: br %r14
%ptr1 = getelementptr float, float *%base, i64 %index
%ptr2 = getelementptr float, float *%ptr1, i64 100
%f2 = load float, float *%ptr2
%res = call float @llvm.experimental.constrained.fsub.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret float %res
}
; Check that subtractions of spilled values can use SEB rather than SEBR.
define float @f7(float *%ptr0) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
; CHECK-SCALAR: seb %f0, 16{{[04]}}(%r15)
; CHECK: br %r14
%ptr1 = getelementptr float, float *%ptr0, i64 2
%ptr2 = getelementptr float, float *%ptr0, i64 4
%ptr3 = getelementptr float, float *%ptr0, i64 6
%ptr4 = getelementptr float, float *%ptr0, i64 8
%ptr5 = getelementptr float, float *%ptr0, i64 10
%ptr6 = getelementptr float, float *%ptr0, i64 12
%ptr7 = getelementptr float, float *%ptr0, i64 14
%ptr8 = getelementptr float, float *%ptr0, i64 16
%ptr9 = getelementptr float, float *%ptr0, i64 18
%ptr10 = getelementptr float, float *%ptr0, i64 20
%val0 = load float, float *%ptr0
%val1 = load float, float *%ptr1
%val2 = load float, float *%ptr2
%val3 = load float, float *%ptr3
%val4 = load float, float *%ptr4
%val5 = load float, float *%ptr5
%val6 = load float, float *%ptr6
%val7 = load float, float *%ptr7
%val8 = load float, float *%ptr8
%val9 = load float, float *%ptr9
%val10 = load float, float *%ptr10
%ret = call float @foo() #0
%sub0 = call float @llvm.experimental.constrained.fsub.f32(
float %ret, float %val0,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
%sub1 = call float @llvm.experimental.constrained.fsub.f32(
float %sub0, float %val1,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
%sub2 = call float @llvm.experimental.constrained.fsub.f32(
float %sub1, float %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
%sub3 = call float @llvm.experimental.constrained.fsub.f32(
float %sub2, float %val3,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
%sub4 = call float @llvm.experimental.constrained.fsub.f32(
float %sub3, float %val4,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
%sub5 = call float @llvm.experimental.constrained.fsub.f32(
float %sub4, float %val5,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
%sub6 = call float @llvm.experimental.constrained.fsub.f32(
float %sub5, float %val6,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
%sub7 = call float @llvm.experimental.constrained.fsub.f32(
float %sub6, float %val7,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
%sub8 = call float @llvm.experimental.constrained.fsub.f32(
float %sub7, float %val8,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
%sub9 = call float @llvm.experimental.constrained.fsub.f32(
float %sub8, float %val9,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
%sub10 = call float @llvm.experimental.constrained.fsub.f32(
float %sub9, float %val10,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret float %sub10
}
attributes #0 = { strictfp }
|