1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
| # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple aarch64 -O0 -run-pass=aarch64-prelegalizer-combiner -global-isel -verify-machineinstrs %s -o - | FileCheck %s
# Check we don't try to combine a load of < s8 as that will end up creating a illegal non-extending load.
--- |
define i8 @test(i1* %ptr) {
ret i8 undef
}
...
---
name: test
alignment: 4
tracksRegLiveness: true
registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
- { id: 3, class: _ }
body: |
bb.1 (%ir-block.0):
liveins: $x0
; CHECK-LABEL: name: test
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[COPY]](p0) :: (load 1 from %ir.ptr)
; CHECK: [[ZEXT:%[0-9]+]]:_(s8) = G_ZEXT [[LOAD]](s1)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ZEXT]](s8)
; CHECK: $w0 = COPY [[ANYEXT]](s32)
; CHECK: RET_ReallyLR implicit $w0
%0:_(p0) = COPY $x0
%1:_(s1) = G_LOAD %0(p0) :: (load 1 from %ir.ptr)
%2:_(s8) = G_ZEXT %1(s1)
%3:_(s32) = G_ANYEXT %2(s8)
$w0 = COPY %3(s32)
RET_ReallyLR implicit $w0
...
|