Skip to content
This repository was archived by the owner on Sep 2, 2018. It is now read-only.

Commit 8d04517

Browse files
Nikolay HaustovNikolay Haustov
Nikolay Haustov
authored and
Nikolay Haustov
committed
[AMDGPU] Assembler: Simplify handling of optional operands
Prepare to support DPP encodings. For DPP encodings, we want row_mask/bank_mask/bound_ctrl to be optional operands. However this means that when parsing instruction which has no mnemonic prefix, we cannot add both default values for VOP3 and for DPP optional operands to OperandVector - neither instructions would match. So add default values for optional operands to MCInst during conversion instead. Mark more operands as IsOptional = 1 in .td files. Do not add default values for optional operands to OperandVector in AMDGPUAsmParser. Add default values for optional operands during conversion using new helper addOptionalImmOperand. Change to cvtVOP3_2_mod to check instruction flag instead of presence of modifiers. In the future, cvtVOP3* functions can be combined into one. Separate cvtFlat and cvtFlatAtomic. Fix CNDMASK_B32 definition to have no modifiers. Review: http://reviews.llvm.org/D17445 Reviewers: tstellarAMD git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@261742 91177308-0d34-0410-b5e6-96231b3b80d8
1 parent ba9f09c commit 8d04517

File tree

3 files changed

+79
-75
lines changed

3 files changed

+79
-75
lines changed

lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp

Lines changed: 61 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -486,6 +486,7 @@ class AMDGPUAsmParser : public MCTargetAsmParser {
486486
OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
487487
OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
488488
void cvtFlat(MCInst &Inst, const OperandVector &Operands);
489+
void cvtFlatAtomic(MCInst &Inst, const OperandVector &Operands);
489490

490491
void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
491492
OperandMatchResultTy parseOffset(OperandVector &Operands);
@@ -672,31 +673,8 @@ bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
672673
SMLoc ErrorLoc = IDLoc;
673674
if (ErrorInfo != ~0ULL) {
674675
if (ErrorInfo >= Operands.size()) {
675-
if (isForcedVOP3()) {
676-
// If 64-bit encoding has been forced we can end up with no
677-
// clamp or omod operands if none of the registers have modifiers,
678-
// so we need to add these to the operand list.
679-
AMDGPUOperand &LastOp =
680-
((AMDGPUOperand &)*Operands[Operands.size() - 1]);
681-
if (LastOp.isRegKind() ||
682-
(LastOp.isImm() &&
683-
LastOp.getImmTy() != AMDGPUOperand::ImmTyNone)) {
684-
SMLoc S = Parser.getTok().getLoc();
685-
Operands.push_back(AMDGPUOperand::CreateImm(0, S,
686-
AMDGPUOperand::ImmTyClamp));
687-
Operands.push_back(AMDGPUOperand::CreateImm(0, S,
688-
AMDGPUOperand::ImmTyOMod));
689-
bool Res = MatchAndEmitInstruction(IDLoc, Opcode, Operands,
690-
Out, ErrorInfo,
691-
MatchingInlineAsm);
692-
if (!Res)
693-
return Res;
694-
}
695-
696-
}
697676
return Error(IDLoc, "too few operands for instruction");
698677
}
699-
700678
ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
701679
if (ErrorLoc == SMLoc())
702680
ErrorLoc = IDLoc;
@@ -1261,13 +1239,6 @@ bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
12611239
}
12621240
}
12631241

1264-
// Once we reach end of statement, continue parsing so we can add default
1265-
// values for optional arguments.
1266-
AMDGPUAsmParser::OperandMatchResultTy Res;
1267-
while ((Res = parseOperand(Operands, Name)) != MatchOperand_NoMatch) {
1268-
if (Res != MatchOperand_Success)
1269-
return Error(getLexer().getLoc(), "failed parsing operand.");
1270-
}
12711242
return false;
12721243
}
12731244

@@ -1356,6 +1327,18 @@ AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
13561327
return MatchOperand_Success;
13571328
}
13581329

1330+
typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1331+
1332+
void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands, OptionalImmIndexMap& OptionalIdx, enum AMDGPUOperand::ImmTy ImmT) {
1333+
auto i = OptionalIdx.find(ImmT);
1334+
if (i != OptionalIdx.end()) {
1335+
unsigned Idx = i->second;
1336+
((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1337+
} else {
1338+
Inst.addOperand(MCOperand::createImm(0));
1339+
}
1340+
}
1341+
13591342
static bool operandsHasOptionalOp(const OperandVector &Operands,
13601343
const OptionalOperand &OOp) {
13611344
for (unsigned i = 0; i < Operands.size(); i++) {
@@ -1392,11 +1375,15 @@ AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
13921375
if (Res != MatchOperand_Success)
13931376
return Res;
13941377

1378+
bool DefaultValue = (Value == Op.Default);
1379+
13951380
if (Op.ConvertResult && !Op.ConvertResult(Value)) {
13961381
return MatchOperand_ParseFail;
13971382
}
13981383

1399-
Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
1384+
if (!DefaultValue) {
1385+
Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
1386+
}
14001387
return MatchOperand_Success;
14011388
}
14021389
return MatchOperand_NoMatch;
@@ -1450,7 +1437,7 @@ bool AMDGPUOperand::isDSOffset01() const {
14501437
void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
14511438
const OperandVector &Operands) {
14521439

1453-
std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1440+
OptionalImmIndexMap OptionalIdx;
14541441

14551442
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
14561443
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
@@ -1465,13 +1452,10 @@ void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
14651452
OptionalIdx[Op.getImmTy()] = i;
14661453
}
14671454

1468-
unsigned Offset0Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset0];
1469-
unsigned Offset1Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset1];
1470-
unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
1455+
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset0);
1456+
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset1);
1457+
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
14711458

1472-
((AMDGPUOperand &)*Operands[Offset0Idx]).addImmOperands(Inst, 1); // offset0
1473-
((AMDGPUOperand &)*Operands[Offset1Idx]).addImmOperands(Inst, 1); // offset1
1474-
((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
14751459
Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
14761460
}
14771461

@@ -1498,12 +1482,11 @@ void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
14981482
OptionalIdx[Op.getImmTy()] = i;
14991483
}
15001484

1501-
unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
1502-
((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1); // offset
1485+
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1486+
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
15031487

15041488
if (!GDSOnly) {
1505-
unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
1506-
((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
1489+
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
15071490
}
15081491
Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
15091492
}
@@ -1642,7 +1625,7 @@ AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
16421625

16431626
void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
16441627
const OperandVector &Operands) {
1645-
std::map<AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1628+
OptionalImmIndexMap OptionalIdx;
16461629

16471630
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
16481631
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
@@ -1653,27 +1636,39 @@ void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
16531636
continue;
16541637
}
16551638

1656-
// Handle 'glc' token which is sometimes hard-coded into the
1657-
// asm string. There are no MCInst operands for these.
1658-
if (Op.isToken())
1659-
continue;
1660-
1661-
// Handle optional arguments
16621639
OptionalIdx[Op.getImmTy()] = i;
1663-
16641640
}
1641+
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1642+
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1643+
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1644+
}
16651645

1666-
// flat atomic instructions don't have a glc argument.
1667-
if (OptionalIdx.count(AMDGPUOperand::ImmTyGLC)) {
1668-
unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
1669-
((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
1670-
}
16711646

1672-
unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
1673-
unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
1647+
void AMDGPUAsmParser::cvtFlatAtomic(MCInst &Inst,
1648+
const OperandVector &Operands) {
1649+
OptionalImmIndexMap OptionalIdx;
16741650

1675-
((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
1676-
((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
1651+
bool token = false;
1652+
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1653+
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1654+
1655+
// Add the register arguments
1656+
if (Op.isReg()) {
1657+
Op.addRegOperands(Inst, 1);
1658+
continue;
1659+
}
1660+
1661+
// Handle 'glc' token for flat atomics.
1662+
if (Op.isToken()) {
1663+
token = true;
1664+
continue;
1665+
}
1666+
1667+
// Handle optional arguments
1668+
OptionalIdx[Op.getImmTy()] = token ? i - 1 : i;
1669+
}
1670+
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1671+
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
16771672
}
16781673

16791674
//===----------------------------------------------------------------------===//
@@ -1718,7 +1713,7 @@ bool AMDGPUOperand::isMubufOffset() const {
17181713

17191714
void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
17201715
const OperandVector &Operands) {
1721-
std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1716+
OptionalImmIndexMap OptionalIdx;
17221717

17231718
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
17241719
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
@@ -1746,17 +1741,10 @@ void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
17461741
OptionalIdx[Op.getImmTy()] = i;
17471742
}
17481743

1749-
assert(OptionalIdx.size() == 4);
1750-
1751-
unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
1752-
unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
1753-
unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
1754-
unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
1755-
1756-
((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1);
1757-
((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
1758-
((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
1759-
((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
1744+
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1745+
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1746+
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1747+
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
17601748
}
17611749

17621750
//===----------------------------------------------------------------------===//
@@ -1890,7 +1878,8 @@ void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
18901878
}
18911879

18921880
void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
1893-
if (operandsHaveModifiers(Operands) || isForcedVOP3()) {
1881+
uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1882+
if (TSFlags & SIInstrFlags::VOP3) {
18941883
cvtVOP3(Inst, Operands);
18951884
} else {
18961885
cvtId(Inst, Operands);

lib/Target/AMDGPU/SIInstrInfo.td

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -435,13 +435,15 @@ def MubufOffsetMatchClass : AsmOperandClass {
435435
let Name = "MubufOffset";
436436
let ParserMethod = "parseMubufOptionalOps";
437437
let RenderMethod = "addImmOperands";
438+
let IsOptional = 1;
438439
}
439440

440441
class DSOffsetBaseMatchClass <string parser> : AsmOperandClass {
441442
let Name = "DSOffset"#parser;
442443
let ParserMethod = parser;
443444
let RenderMethod = "addImmOperands";
444445
let PredicateMethod = "isDSOffset";
446+
let IsOptional = 1;
445447
}
446448

447449
def DSOffsetMatchClass : DSOffsetBaseMatchClass <"parseDSOptionalOps">;
@@ -452,13 +454,15 @@ def DSOffset01MatchClass : AsmOperandClass {
452454
let ParserMethod = "parseDSOff01OptionalOps";
453455
let RenderMethod = "addImmOperands";
454456
let PredicateMethod = "isDSOffset01";
457+
let IsOptional = 1;
455458
}
456459

457460
class GDSBaseMatchClass <string parser> : AsmOperandClass {
458461
let Name = "GDS"#parser;
459462
let PredicateMethod = "isImm";
460463
let ParserMethod = parser;
461464
let RenderMethod = "addImmOperands";
465+
let IsOptional = 1;
462466
}
463467

464468
def GDSMatchClass : GDSBaseMatchClass <"parseDSOptionalOps">;
@@ -469,6 +473,7 @@ class GLCBaseMatchClass <string parser> : AsmOperandClass {
469473
let PredicateMethod = "isImm";
470474
let ParserMethod = parser;
471475
let RenderMethod = "addImmOperands";
476+
let IsOptional = 1;
472477
}
473478

474479
def GLCMubufMatchClass : GLCBaseMatchClass <"parseMubufOptionalOps">;
@@ -479,6 +484,7 @@ class SLCBaseMatchClass <string parser> : AsmOperandClass {
479484
let PredicateMethod = "isImm";
480485
let ParserMethod = parser;
481486
let RenderMethod = "addImmOperands";
487+
let IsOptional = 1;
482488
}
483489

484490
def SLCMubufMatchClass : SLCBaseMatchClass <"parseMubufOptionalOps">;
@@ -490,6 +496,7 @@ class TFEBaseMatchClass <string parser> : AsmOperandClass {
490496
let PredicateMethod = "isImm";
491497
let ParserMethod = parser;
492498
let RenderMethod = "addImmOperands";
499+
let IsOptional = 1;
493500
}
494501

495502
def TFEMubufMatchClass : TFEBaseMatchClass <"parseMubufOptionalOps">;
@@ -523,13 +530,21 @@ def SMRDLiteralOffsetMatchClass : SMRDOffsetBaseMatchClass <
523530
"isSMRDLiteralOffset"
524531
>;
525532

533+
class OptionalImmAsmOperand <string OpName> : AsmOperandClass {
534+
let Name = "Imm"#OpName;
535+
let PredicateMethod = "isImm";
536+
let IsOptional = 1;
537+
}
538+
526539
let OperandType = "OPERAND_IMMEDIATE" in {
527540

528541
def offen : Operand<i1> {
529542
let PrintMethod = "printOffen";
543+
let ParserMatchClass = OptionalImmAsmOperand<"offen">;
530544
}
531545
def idxen : Operand<i1> {
532546
let PrintMethod = "printIdxen";
547+
let ParserMatchClass = OptionalImmAsmOperand<"idxen">;
533548
}
534549
def addr64 : Operand<i1> {
535550
let PrintMethod = "printAddr64";
@@ -2871,7 +2886,7 @@ multiclass FLAT_ATOMIC <flat op, string asm_name, RegisterClass vdst_rc,
28712886
dag outs_noret = (outs),
28722887
string asm_noret = asm_name#" $addr, $data"#"$slc"#"$tfe"> {
28732888

2874-
let mayLoad = 1, mayStore = 1, glc = 0, vdst = 0 in {
2889+
let mayLoad = 1, mayStore = 1, glc = 0, vdst = 0, AsmMatchConverter = "cvtFlatAtomic" in {
28752890
def "" : FLAT_Pseudo <NAME, outs_noret,
28762891
(ins VReg_64:$addr, data_rc:$data,
28772892
slc_flat_atomic:$slc, tfe_flat_atomic:$tfe), []>,
@@ -2888,7 +2903,7 @@ multiclass FLAT_ATOMIC <flat op, string asm_name, RegisterClass vdst_rc,
28882903
asm_noret>;
28892904
}
28902905

2891-
let glc = 1, hasPostISelHook = 1 in {
2906+
let glc = 1, hasPostISelHook = 1, AsmMatchConverter = "cvtFlatAtomic" in {
28922907
defm _RTN : FLAT_AtomicRet_m <op, (outs vdst_rc:$vdst),
28932908
(ins VReg_64:$addr, data_rc:$data, slc_flat_atomic:$slc,
28942909
tfe_flat_atomic:$tfe),

lib/Target/AMDGPU/SIInstructions.td

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1433,7 +1433,7 @@ multiclass V_CNDMASK <vop2 op, string name> {
14331433

14341434
defm _e64 : VOP3_m <
14351435
op, VOP_CNDMASK.Outs, VOP_CNDMASK.Ins64,
1436-
name#!cast<string>(VOP_CNDMASK.Asm64), [], name, 3>;
1436+
name#!cast<string>(VOP_CNDMASK.Asm64), [], name, 3, 0>;
14371437
}
14381438

14391439
defm V_CNDMASK_B32 : V_CNDMASK<vop2<0x0>, "v_cndmask_b32">;

0 commit comments

Comments
 (0)