@@ -486,6 +486,7 @@ class AMDGPUAsmParser : public MCTargetAsmParser {
486
486
OperandMatchResultTy parseFlatOptionalOps (OperandVector &Operands);
487
487
OperandMatchResultTy parseFlatAtomicOptionalOps (OperandVector &Operands);
488
488
void cvtFlat (MCInst &Inst, const OperandVector &Operands);
489
+ void cvtFlatAtomic (MCInst &Inst, const OperandVector &Operands);
489
490
490
491
void cvtMubuf (MCInst &Inst, const OperandVector &Operands);
491
492
OperandMatchResultTy parseOffset (OperandVector &Operands);
@@ -672,31 +673,8 @@ bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
672
673
SMLoc ErrorLoc = IDLoc;
673
674
if (ErrorInfo != ~0ULL ) {
674
675
if (ErrorInfo >= Operands.size ()) {
675
- if (isForcedVOP3 ()) {
676
- // If 64-bit encoding has been forced we can end up with no
677
- // clamp or omod operands if none of the registers have modifiers,
678
- // so we need to add these to the operand list.
679
- AMDGPUOperand &LastOp =
680
- ((AMDGPUOperand &)*Operands[Operands.size () - 1 ]);
681
- if (LastOp.isRegKind () ||
682
- (LastOp.isImm () &&
683
- LastOp.getImmTy () != AMDGPUOperand::ImmTyNone)) {
684
- SMLoc S = Parser.getTok ().getLoc ();
685
- Operands.push_back (AMDGPUOperand::CreateImm (0 , S,
686
- AMDGPUOperand::ImmTyClamp));
687
- Operands.push_back (AMDGPUOperand::CreateImm (0 , S,
688
- AMDGPUOperand::ImmTyOMod));
689
- bool Res = MatchAndEmitInstruction (IDLoc, Opcode, Operands,
690
- Out, ErrorInfo,
691
- MatchingInlineAsm);
692
- if (!Res)
693
- return Res;
694
- }
695
-
696
- }
697
676
return Error (IDLoc, " too few operands for instruction" );
698
677
}
699
-
700
678
ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc ();
701
679
if (ErrorLoc == SMLoc ())
702
680
ErrorLoc = IDLoc;
@@ -1261,13 +1239,6 @@ bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1261
1239
}
1262
1240
}
1263
1241
1264
- // Once we reach end of statement, continue parsing so we can add default
1265
- // values for optional arguments.
1266
- AMDGPUAsmParser::OperandMatchResultTy Res;
1267
- while ((Res = parseOperand (Operands, Name)) != MatchOperand_NoMatch) {
1268
- if (Res != MatchOperand_Success)
1269
- return Error (getLexer ().getLoc (), " failed parsing operand." );
1270
- }
1271
1242
return false ;
1272
1243
}
1273
1244
@@ -1356,6 +1327,18 @@ AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
1356
1327
return MatchOperand_Success;
1357
1328
}
1358
1329
1330
+ typedef std::map<enum AMDGPUOperand::ImmTy, unsigned > OptionalImmIndexMap;
1331
+
1332
+ void addOptionalImmOperand (MCInst& Inst, const OperandVector& Operands, OptionalImmIndexMap& OptionalIdx, enum AMDGPUOperand::ImmTy ImmT) {
1333
+ auto i = OptionalIdx.find (ImmT);
1334
+ if (i != OptionalIdx.end ()) {
1335
+ unsigned Idx = i->second ;
1336
+ ((AMDGPUOperand &)*Operands[Idx]).addImmOperands (Inst, 1 );
1337
+ } else {
1338
+ Inst.addOperand (MCOperand::createImm (0 ));
1339
+ }
1340
+ }
1341
+
1359
1342
static bool operandsHasOptionalOp (const OperandVector &Operands,
1360
1343
const OptionalOperand &OOp) {
1361
1344
for (unsigned i = 0 ; i < Operands.size (); i++) {
@@ -1392,11 +1375,15 @@ AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
1392
1375
if (Res != MatchOperand_Success)
1393
1376
return Res;
1394
1377
1378
+ bool DefaultValue = (Value == Op.Default );
1379
+
1395
1380
if (Op.ConvertResult && !Op.ConvertResult (Value)) {
1396
1381
return MatchOperand_ParseFail;
1397
1382
}
1398
1383
1399
- Operands.push_back (AMDGPUOperand::CreateImm (Value, S, Op.Type ));
1384
+ if (!DefaultValue) {
1385
+ Operands.push_back (AMDGPUOperand::CreateImm (Value, S, Op.Type ));
1386
+ }
1400
1387
return MatchOperand_Success;
1401
1388
}
1402
1389
return MatchOperand_NoMatch;
@@ -1450,7 +1437,7 @@ bool AMDGPUOperand::isDSOffset01() const {
1450
1437
void AMDGPUAsmParser::cvtDSOffset01 (MCInst &Inst,
1451
1438
const OperandVector &Operands) {
1452
1439
1453
- std::map< enum AMDGPUOperand::ImmTy, unsigned > OptionalIdx;
1440
+ OptionalImmIndexMap OptionalIdx;
1454
1441
1455
1442
for (unsigned i = 1 , e = Operands.size (); i != e; ++i) {
1456
1443
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
@@ -1465,13 +1452,10 @@ void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1465
1452
OptionalIdx[Op.getImmTy ()] = i;
1466
1453
}
1467
1454
1468
- unsigned Offset0Idx = OptionalIdx[ AMDGPUOperand::ImmTyDSOffset0] ;
1469
- unsigned Offset1Idx = OptionalIdx[ AMDGPUOperand::ImmTyDSOffset1] ;
1470
- unsigned GDSIdx = OptionalIdx[ AMDGPUOperand::ImmTyGDS] ;
1455
+ addOptionalImmOperand (Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset0) ;
1456
+ addOptionalImmOperand (Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset1) ;
1457
+ addOptionalImmOperand (Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS) ;
1471
1458
1472
- ((AMDGPUOperand &)*Operands[Offset0Idx]).addImmOperands (Inst, 1 ); // offset0
1473
- ((AMDGPUOperand &)*Operands[Offset1Idx]).addImmOperands (Inst, 1 ); // offset1
1474
- ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands (Inst, 1 ); // gds
1475
1459
Inst.addOperand (MCOperand::createReg (AMDGPU::M0)); // m0
1476
1460
}
1477
1461
@@ -1498,12 +1482,11 @@ void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1498
1482
OptionalIdx[Op.getImmTy ()] = i;
1499
1483
}
1500
1484
1501
- unsigned OffsetIdx = OptionalIdx[ AMDGPUOperand::ImmTyOffset] ;
1502
- ((AMDGPUOperand &)* Operands[OffsetIdx]). addImmOperands (Inst, 1 ); // offset
1485
+ addOptionalImmOperand (Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset) ;
1486
+ addOptionalImmOperand (Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
1503
1487
1504
1488
if (!GDSOnly) {
1505
- unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
1506
- ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands (Inst, 1 ); // gds
1489
+ addOptionalImmOperand (Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
1507
1490
}
1508
1491
Inst.addOperand (MCOperand::createReg (AMDGPU::M0)); // m0
1509
1492
}
@@ -1642,7 +1625,7 @@ AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
1642
1625
1643
1626
void AMDGPUAsmParser::cvtFlat (MCInst &Inst,
1644
1627
const OperandVector &Operands) {
1645
- std::map<AMDGPUOperand::ImmTy, unsigned > OptionalIdx;
1628
+ OptionalImmIndexMap OptionalIdx;
1646
1629
1647
1630
for (unsigned i = 1 , e = Operands.size (); i != e; ++i) {
1648
1631
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
@@ -1653,27 +1636,39 @@ void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
1653
1636
continue ;
1654
1637
}
1655
1638
1656
- // Handle 'glc' token which is sometimes hard-coded into the
1657
- // asm string. There are no MCInst operands for these.
1658
- if (Op.isToken ())
1659
- continue ;
1660
-
1661
- // Handle optional arguments
1662
1639
OptionalIdx[Op.getImmTy ()] = i;
1663
-
1664
1640
}
1641
+ addOptionalImmOperand (Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1642
+ addOptionalImmOperand (Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1643
+ addOptionalImmOperand (Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1644
+ }
1665
1645
1666
- // flat atomic instructions don't have a glc argument.
1667
- if (OptionalIdx.count (AMDGPUOperand::ImmTyGLC)) {
1668
- unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
1669
- ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands (Inst, 1 );
1670
- }
1671
1646
1672
- unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
1673
- unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
1647
+ void AMDGPUAsmParser::cvtFlatAtomic (MCInst &Inst,
1648
+ const OperandVector &Operands) {
1649
+ OptionalImmIndexMap OptionalIdx;
1674
1650
1675
- ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands (Inst, 1 );
1676
- ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands (Inst, 1 );
1651
+ bool token = false ;
1652
+ for (unsigned i = 1 , e = Operands.size (); i != e; ++i) {
1653
+ AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1654
+
1655
+ // Add the register arguments
1656
+ if (Op.isReg ()) {
1657
+ Op.addRegOperands (Inst, 1 );
1658
+ continue ;
1659
+ }
1660
+
1661
+ // Handle 'glc' token for flat atomics.
1662
+ if (Op.isToken ()) {
1663
+ token = true ;
1664
+ continue ;
1665
+ }
1666
+
1667
+ // Handle optional arguments
1668
+ OptionalIdx[Op.getImmTy ()] = token ? i - 1 : i;
1669
+ }
1670
+ addOptionalImmOperand (Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1671
+ addOptionalImmOperand (Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1677
1672
}
1678
1673
1679
1674
// ===----------------------------------------------------------------------===//
@@ -1718,7 +1713,7 @@ bool AMDGPUOperand::isMubufOffset() const {
1718
1713
1719
1714
void AMDGPUAsmParser::cvtMubuf (MCInst &Inst,
1720
1715
const OperandVector &Operands) {
1721
- std::map< enum AMDGPUOperand::ImmTy, unsigned > OptionalIdx;
1716
+ OptionalImmIndexMap OptionalIdx;
1722
1717
1723
1718
for (unsigned i = 1 , e = Operands.size (); i != e; ++i) {
1724
1719
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
@@ -1746,17 +1741,10 @@ void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
1746
1741
OptionalIdx[Op.getImmTy ()] = i;
1747
1742
}
1748
1743
1749
- assert (OptionalIdx.size () == 4 );
1750
-
1751
- unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
1752
- unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
1753
- unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
1754
- unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
1755
-
1756
- ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands (Inst, 1 );
1757
- ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands (Inst, 1 );
1758
- ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands (Inst, 1 );
1759
- ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands (Inst, 1 );
1744
+ addOptionalImmOperand (Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1745
+ addOptionalImmOperand (Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1746
+ addOptionalImmOperand (Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1747
+ addOptionalImmOperand (Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1760
1748
}
1761
1749
1762
1750
// ===----------------------------------------------------------------------===//
@@ -1890,7 +1878,8 @@ void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
1890
1878
}
1891
1879
1892
1880
void AMDGPUAsmParser::cvtVOP3_2_mod (MCInst &Inst, const OperandVector &Operands) {
1893
- if (operandsHaveModifiers (Operands) || isForcedVOP3 ()) {
1881
+ uint64_t TSFlags = MII.get (Inst.getOpcode ()).TSFlags ;
1882
+ if (TSFlags & SIInstrFlags::VOP3) {
1894
1883
cvtVOP3 (Inst, Operands);
1895
1884
} else {
1896
1885
cvtId (Inst, Operands);
0 commit comments