1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
| # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -march=aarch64 -run-pass=aarch64-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s
--- |
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "aarch64"
define void @test_ms1(i8* nocapture %dst, i32 %c, i32 %len) local_unnamed_addr #0 {
entry:
%0 = trunc i32 %c to i8
%conv = zext i32 %len to i64
tail call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 %0, i64 %conv, i1 false)
ret void
}
declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg) #1
define void @test_ms2_const(i8* nocapture %dst, i32 %c) local_unnamed_addr #0 {
entry:
%0 = trunc i32 %c to i8
tail call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 %0, i64 16, i1 false)
ret void
}
define void @test_ms3_const_both(i8* nocapture %dst) local_unnamed_addr #0 {
entry:
tail call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 64, i64 16, i1 false)
ret void
}
define void @test_ms4_const_both_unaligned(i8* nocapture %dst) local_unnamed_addr #0 {
entry:
tail call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 64, i64 18, i1 false)
ret void
}
declare void @llvm.stackprotector(i8*, i8**) #2
attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cyclone" "target-features"="+aes,+crypto,+fp-armv8,+neon,+sha2,+zcm,+zcz" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind }
...
---
name: test_ms1
alignment: 4
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $w1, $w2, $x0
; CHECK-LABEL: name: test_ms1
; CHECK: liveins: $w1, $w2, $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY2]](s32)
; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), [[COPY]](p0), [[TRUNC]](s8), [[ZEXT]](s64), 1 :: (store 1 into %ir.dst)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(s32) = COPY $w1
%2:_(s32) = COPY $w2
%3:_(s8) = G_TRUNC %1(s32)
%4:_(s64) = G_ZEXT %2(s32)
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %3(s8), %4(s64), 1 :: (store 1 into %ir.dst)
RET_ReallyLR
...
---
name: test_ms2_const
alignment: 4
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $w1, $x0
; CHECK-LABEL: name: test_ms2_const
; CHECK: liveins: $w1, $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC]](s8)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C]]
; CHECK: G_STORE [[MUL]](s64), [[COPY]](p0) :: (store 8 into %ir.dst, align 1)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C1]](s64)
; CHECK: G_STORE [[MUL]](s64), [[GEP]](p0) :: (store 8 into %ir.dst + 8, align 1)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(s32) = COPY $w1
%3:_(s64) = G_CONSTANT i64 16
%2:_(s8) = G_TRUNC %1(s32)
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %2(s8), %3(s64), 1 :: (store 1 into %ir.dst)
RET_ReallyLR
...
---
name: test_ms3_const_both
alignment: 4
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $x0
; CHECK-LABEL: name: test_ms3_const_both
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4629771061636907072
; CHECK: G_STORE [[C]](s64), [[COPY]](p0) :: (store 8 into %ir.dst, align 1)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C1]](s64)
; CHECK: G_STORE [[C]](s64), [[GEP]](p0) :: (store 8 into %ir.dst + 8, align 1)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(s8) = G_CONSTANT i8 64
%2:_(s64) = G_CONSTANT i64 16
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %1(s8), %2(s64), 1 :: (store 1 into %ir.dst)
RET_ReallyLR
...
---
name: test_ms4_const_both_unaligned
alignment: 4
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $x0
; CHECK-LABEL: name: test_ms4_const_both_unaligned
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4629771061636907072
; CHECK: G_STORE [[C]](s64), [[COPY]](p0) :: (store 8 into %ir.dst, align 1)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C1]](s64)
; CHECK: G_STORE [[C]](s64), [[GEP]](p0) :: (store 8 into %ir.dst + 8, align 1)
; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s64)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C2]](s64)
; CHECK: G_STORE [[TRUNC]](s16), [[GEP1]](p0) :: (store 2 into %ir.dst + 16, align 1)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(s8) = G_CONSTANT i8 64
%2:_(s64) = G_CONSTANT i64 18
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %1(s8), %2(s64), 1 :: (store 1 into %ir.dst)
RET_ReallyLR
...
|