1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
| //===--------------------- DispatchStage.cpp --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file models the dispatch component of an instruction pipeline.
///
/// The DispatchStage is responsible for updating instruction dependencies
/// and communicating to the simulated instruction scheduler that an instruction
/// is ready to be scheduled for execution.
///
//===----------------------------------------------------------------------===//
#include "llvm/MCA/Stages/DispatchStage.h"
#include "llvm/MCA/HWEventListener.h"
#include "llvm/MCA/HardwareUnits/Scheduler.h"
#include "llvm/Support/Debug.h"
#define DEBUG_TYPE "llvm-mca"
namespace llvm {
namespace mca {
DispatchStage::DispatchStage(const MCSubtargetInfo &Subtarget,
const MCRegisterInfo &MRI,
unsigned MaxDispatchWidth, RetireControlUnit &R,
RegisterFile &F)
: DispatchWidth(MaxDispatchWidth), AvailableEntries(MaxDispatchWidth),
CarryOver(0U), CarriedOver(), STI(Subtarget), RCU(R), PRF(F) {
if (!DispatchWidth)
DispatchWidth = Subtarget.getSchedModel().IssueWidth;
}
void DispatchStage::notifyInstructionDispatched(const InstRef &IR,
ArrayRef<unsigned> UsedRegs,
unsigned UOps) const {
LLVM_DEBUG(dbgs() << "[E] Instruction Dispatched: #" << IR << '\n');
notifyEvent<HWInstructionEvent>(
HWInstructionDispatchedEvent(IR, UsedRegs, UOps));
}
bool DispatchStage::checkPRF(const InstRef &IR) const {
SmallVector<MCPhysReg, 4> RegDefs;
for (const WriteState &RegDef : IR.getInstruction()->getDefs())
RegDefs.emplace_back(RegDef.getRegisterID());
const unsigned RegisterMask = PRF.isAvailable(RegDefs);
// A mask with all zeroes means: register files are available.
if (RegisterMask) {
notifyEvent<HWStallEvent>(
HWStallEvent(HWStallEvent::RegisterFileStall, IR));
return false;
}
return true;
}
bool DispatchStage::checkRCU(const InstRef &IR) const {
const unsigned NumMicroOps = IR.getInstruction()->getNumMicroOps();
if (RCU.isAvailable(NumMicroOps))
return true;
notifyEvent<HWStallEvent>(
HWStallEvent(HWStallEvent::RetireControlUnitStall, IR));
return false;
}
bool DispatchStage::canDispatch(const InstRef &IR) const {
bool CanDispatch = checkRCU(IR);
CanDispatch &= checkPRF(IR);
CanDispatch &= checkNextStage(IR);
return CanDispatch;
}
Error DispatchStage::dispatch(InstRef IR) {
assert(!CarryOver && "Cannot dispatch another instruction!");
Instruction &IS = *IR.getInstruction();
const InstrDesc &Desc = IS.getDesc();
const unsigned NumMicroOps = IS.getNumMicroOps();
if (NumMicroOps > DispatchWidth) {
assert(AvailableEntries == DispatchWidth);
AvailableEntries = 0;
CarryOver = NumMicroOps - DispatchWidth;
CarriedOver = IR;
} else {
assert(AvailableEntries >= NumMicroOps);
AvailableEntries -= NumMicroOps;
}
// Check if this instructions ends the dispatch group.
if (Desc.EndGroup)
AvailableEntries = 0;
// Check if this is an optimizable reg-reg move.
if (IS.isOptimizableMove()) {
assert(IS.getDefs().size() == 1 && "Expected a single input!");
assert(IS.getUses().size() == 1 && "Expected a single output!");
if (PRF.tryEliminateMove(IS.getDefs()[0], IS.getUses()[0]))
IS.setEliminated();
}
// A dependency-breaking instruction doesn't have to wait on the register
// input operands, and it is often optimized at register renaming stage.
// Update RAW dependencies if this instruction is not a dependency-breaking
// instruction. A dependency-breaking instruction is a zero-latency
// instruction that doesn't consume hardware resources.
// An example of dependency-breaking instruction on X86 is a zero-idiom XOR.
//
// We also don't update data dependencies for instructions that have been
// eliminated at register renaming stage.
if (!IS.isEliminated()) {
for (ReadState &RS : IS.getUses())
PRF.addRegisterRead(RS, STI);
}
// By default, a dependency-breaking zero-idiom is expected to be optimized
// at register renaming stage. That means, no physical register is allocated
// to the instruction.
SmallVector<unsigned, 4> RegisterFiles(PRF.getNumRegisterFiles());
for (WriteState &WS : IS.getDefs())
PRF.addRegisterWrite(WriteRef(IR.getSourceIndex(), &WS), RegisterFiles);
// Reserve entries in the reorder buffer.
unsigned RCUTokenID = RCU.dispatch(IR);
// Notify the instruction that it has been dispatched.
IS.dispatch(RCUTokenID);
// Notify listeners of the "instruction dispatched" event,
// and move IR to the next stage.
notifyInstructionDispatched(IR, RegisterFiles,
std::min(DispatchWidth, NumMicroOps));
return moveToTheNextStage(IR);
}
Error DispatchStage::cycleStart() {
PRF.cycleStart();
if (!CarryOver) {
AvailableEntries = DispatchWidth;
return ErrorSuccess();
}
AvailableEntries = CarryOver >= DispatchWidth ? 0 : DispatchWidth - CarryOver;
unsigned DispatchedOpcodes = DispatchWidth - AvailableEntries;
CarryOver -= DispatchedOpcodes;
assert(CarriedOver && "Invalid dispatched instruction");
SmallVector<unsigned, 8> RegisterFiles(PRF.getNumRegisterFiles(), 0U);
notifyInstructionDispatched(CarriedOver, RegisterFiles, DispatchedOpcodes);
if (!CarryOver)
CarriedOver = InstRef();
return ErrorSuccess();
}
bool DispatchStage::isAvailable(const InstRef &IR) const {
const Instruction &Inst = *IR.getInstruction();
unsigned NumMicroOps = Inst.getNumMicroOps();
const InstrDesc &Desc = Inst.getDesc();
unsigned Required = std::min(NumMicroOps, DispatchWidth);
if (Required > AvailableEntries)
return false;
if (Desc.BeginGroup && AvailableEntries != DispatchWidth)
return false;
// The dispatch logic doesn't internally buffer instructions. It only accepts
// instructions that can be successfully moved to the next stage during this
// same cycle.
return canDispatch(IR);
}
Error DispatchStage::execute(InstRef &IR) {
assert(canDispatch(IR) && "Cannot dispatch another instruction!");
return dispatch(IR);
}
#ifndef NDEBUG
void DispatchStage::dump() const {
PRF.dump();
RCU.dump();
}
#endif
} // namespace mca
} // namespace llvm
|