reference, declaration → definition definition → references, declarations, derived classes, virtual overrides reference to multiple definitions → definitions unreferenced |
263 unsigned Opc = LdSt.getOpcode(); 265 if (isDS(LdSt)) { 267 getNamedOperand(LdSt, AMDGPU::OpName::offset); 270 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); 286 getNamedOperand(LdSt, AMDGPU::OpName::offset0); 288 getNamedOperand(LdSt, AMDGPU::OpName::offset1); 298 if (LdSt.mayLoad()) 299 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16; 301 assert(LdSt.mayStore()); 303 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8; 309 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); 319 if (isMUBUF(LdSt) || isMTBUF(LdSt)) { 319 if (isMUBUF(LdSt) || isMTBUF(LdSt)) { 320 const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset); 324 const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 328 const MachineOperand *RSrc = getNamedOperand(LdSt, AMDGPU::OpName::srsrc); 330 = LdSt.getParent()->getParent()->getInfo<SIMachineFunctionInfo>(); 335 getNamedOperand(LdSt, AMDGPU::OpName::offset); 341 const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 346 getNamedOperand(LdSt, AMDGPU::OpName::offset); 358 if (isSMRD(LdSt)) { 360 getNamedOperand(LdSt, AMDGPU::OpName::offset); 364 const MachineOperand *SBaseReg = getNamedOperand(LdSt, AMDGPU::OpName::sbase); 372 if (isFLAT(LdSt)) { 373 const MachineOperand *VAddr = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 376 if (getNamedOperand(LdSt, AMDGPU::OpName::saddr)) 382 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr); 385 Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm();