Difference between revisions of "NaplesPUInstrFormats.td"

From NaplesPU Documentation
Jump to: navigation, search
 
(6 intermediate revisions by 3 users not shown)
Line 1: Line 1:
 
[[Category:Tablegen Files]]
 
[[Category:Tablegen Files]]
NuPlusInstrFormats.td and [[NuPlusInstrInfo.td]] describe the nu+ instructions and the patterns to transform LLVM IR into machine code. The NuPlusInstrFormats.td contains the classes that describe the [[ISA|nu+ instruction formats]], support classes that facilitates the instructions definition and also the definition nodes which make the pattern recognition easier.
+
The NaplesPUInstrFormats.td contains the classes that describe the [[ISA|NaplesPU instruction formats]], support classes to facilitate the instructions definition and also the definition nodes which make the pattern recognition easier.
  
The files "compiler/include/llvm/Target/Target.td" and "compiler/include/llvm/Target/TargetSelectionDAG.td" contain the Tablegen classes used for the description.
+
<syntaxhighlight>
 +
//===-- NaplesPUInstrFormats.td - NaplesPU Instruction Formats ---*- tablegen -*-===//
 +
//
 +
//                     The LLVM Compiler Infrastructure
 +
//
 +
// This file is distributed under the University of Illinois Open Source
 +
// License. See LICENSE.TXT for details.
 +
//
 +
//===----------------------------------------------------------------------===//
  
== Instruction Formats ==
+
//===----------------------------------------------------------------------===//
 +
// Instruction Pattern Stuff
 +
//===----------------------------------------------------------------------===//
  
An instruction is specified in TableGen by the class ''Instruction'' (compiler/include/llvm/Target/Target.td), which contains, among others, the following fields:
+
def simm16  : PatLeaf<(imm), [{ return isInt<16>(N->getSExtValue()); }]>;
 +
def simm9  : PatLeaf<(imm), [{ return isInt<9>(N->getSExtValue()); }]>;
  
* '''Output operands''' (dag OutOperandList;), this contains the output value(s) defined by the instruction as a result of its computation;
+
// Addressing modes as in SPARC
* '''Input operands''' (dag InOperandList;), this holds all the input value(s) used by the instruction as its input operands;
+
def ADDRri : ComplexPattern<iPTR, 2, "SelectADDRri", [frameindex], []>;
* '''Assembly string''' (string AsmString = "";),this stores the string that is recognized by the ''assembler'' or that is printed by the ''disassembler'';
+
def V16ADDRri : ComplexPattern<v16i32, 2, "SelectADDRri", [], []>;
* '''DAG pattern''' (list<dag> Pattern;), this is the DAG pattern of machine-independent ''SelectionDAG'' nodes that is matched by the instruction selector to produce an instance of the corresponding target-specific instruction.
+
def V8ADDRri : ComplexPattern<v8i64, 2, "SelectADDRri", [], []>;
 +
 
 +
//===----------------------------------------------------------------------===//
 +
// NaplesPU profiles and nodes
 +
//===----------------------------------------------------------------------===//
 +
// Transformation nodes
 +
def LO32I : SDNodeXForm<imm, [{
 +
  return CurDAG->getTargetConstant((unsigned)N->getAPIntValue().getLoBits(32).getZExtValue(), SDLoc(N), MVT::i32);}]>;
 +
 
 +
def HI32I : SDNodeXForm<imm, [{
 +
  // Transformation function: shift the immediate value down into the low bits.
 +
  return CurDAG->getTargetConstant((unsigned)N->getAPIntValue().getHiBits(32).getZExtValue(), SDLoc(N),  MVT::i32);}]>;
 +
 
 +
def LO32F : SDNodeXForm<fpimm, [{
 +
  return CurDAG->getTargetConstant((unsigned)(N->getValueAPF().bitcastToAPInt().getLoBits(32).getZExtValue()), SDLoc(N), MVT::i32);}]>;
 +
 
 +
def HI32F : SDNodeXForm<fpimm, [{
 +
  // Transformation function: shift the immediate value down into the low bits.
 +
  return CurDAG->getTargetConstant((unsigned)(N->getValueAPF().bitcastToAPInt().getHiBits(32).getZExtValue()), SDLoc(N),  MVT::i32);}]>;
 +
 
 +
def DIV2 : SDNodeXForm<imm, [{
 +
  return CurDAG->getTargetConstant((unsigned)N->getZExtValue() / 2, SDLoc(N), MVT::i32);}]>;
 +
 
 +
// Moveil/moveih nodes definition, used for globaladdress lowering
 +
def leah  : SDNode<"NaplesPUISD::LEAH", SDTypeProfile<1, 1, []>>;
 +
def leal  : SDNode<"NaplesPUISD::LEAL", SDTypeProfile<1, 2, []>>;
 +
 
 +
// A splat is a vector with the same value in all lanes. Used to handle operation
 +
// with both vector and scalar operands.
 +
def splat : SDNode<"NaplesPUISD::SPLAT", SDTypeProfile<1, 1, [SDTCisEltOfVec<1, 0>]>>;
 +
 
 +
def return : SDNode<"NaplesPUISD::RET_FLAG", SDTypeProfile<0, 0, []>,
 +
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
 +
 
 +
def SDT_SPCall : SDTypeProfile<0, -1, [SDTCisVT<0, i32>]>;
 +
 
 +
def call : SDNode<"NaplesPUISD::CALL", SDT_SPCall,
 +
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, SDNPVariadic]>;
 +
 
 +
//To mark the beginning and end of a call sequence
 +
def SDT_SPCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
 +
def SDT_SPCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
 +
def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_SPCallSeqStart,
 +
                            [SDNPHasChain, SDNPSideEffect, SDNPOutGlue]>;
 +
def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_SPCallSeqEnd,
 +
                            [SDNPHasChain, SDNPSideEffect,
 +
                            SDNPOptInGlue, SDNPOutGlue]>;
 +
//To handle the lack of conditional moves
 +
def selcondresult : SDNode<"NaplesPUISD::SEL_COND_RESULT", SDTypeProfile<1, 3,
 +
[SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>]>>;
 +
 
 +
//===----------------------------------------------------------------------===//
 +
// Operand Definitions
 +
//===----------------------------------------------------------------------===//
 +
 
 +
// Used for the LEA_Sym, to detect the lea pseudo instruction
 +
def symref : Operand<OtherVT> {}
 +
 
 +
def SIMM16OP : Operand<i32> {
 +
  let DecoderMethod = "decodeSimm16Value";
 +
}
 +
 
 +
def SIMM9OP : Operand<i32> {
 +
  let DecoderMethod = "decodeSimm9Value";
 +
}
 +
 
 +
def MemAsmOperand : AsmOperandClass {
 +
  let Name = "Mem";
 +
  let ParserMethod = "ParseMemoryOperand";
 +
}
 +
 
 +
def MEMri : Operand<iPTR> {
 +
  let PrintMethod = "printMemOperand";
 +
  let EncoderMethod = "encodeMemoryOpValue";
 +
  let DecoderMethod = "decodeScalarMemoryOpValue";
 +
  let ParserMatchClass = MemAsmOperand;
 +
  let MIOperandInfo = (ops GPR32, i32imm);
 +
}
 +
 
 +
def V16MEMri : Operand<v16i32> {
 +
  let PrintMethod = "printMemOperand";
 +
  let EncoderMethod = "encodeMemoryOpValue";
 +
  let DecoderMethod = "decodeVectorWMemoryOpValue";
 +
  let ParserMatchClass = MemAsmOperand;
 +
  let MIOperandInfo = (ops VR512W, i32imm);
 +
}
 +
 
 +
def LEAri : Operand<iPTR> {
 +
  let PrintMethod = "printMemOperand";
 +
  let EncoderMethod = "encodeLEAValue";
 +
  let ParserMatchClass = MemAsmOperand; //TODO: controllare se è corretto il ParserMatchClass
 +
  let MIOperandInfo = (ops GPR32, i32imm);
 +
}
 +
 
 +
def ABSh : Operand<iPTR> {
 +
  let PrintMethod = "printMemOperand";
 +
  let EncoderMethod = "encodeABShValue";
 +
  let ParserMatchClass = MemAsmOperand; //TODO: controllare se è corretto il ParserMatchClass
 +
  let MIOperandInfo = (ops i32imm);
 +
}
 +
 
 +
def ABSl : Operand<iPTR> {
 +
  let PrintMethod = "printMemOperand";
 +
  let EncoderMethod = "encodeABSlValue";
 +
  let ParserMatchClass = MemAsmOperand; //TODO: controllare se è corretto il ParserMatchClass
 +
  let MIOperandInfo = (ops i32imm);
 +
}
 +
 
 +
 
 +
def brtarget : Operand<OtherVT>
 +
{
 +
  let EncoderMethod = "encodeBranchTargetOpValue";
 +
  let DecoderMethod = "decodeBranchTargetOpValue";
 +
}
 +
 
 +
def calltarget : Operand<iPTR>
 +
{
 +
  let EncoderMethod = "encodeBranchTargetOpValue";
 +
  let DecoderMethod = "decodeBranchTargetOpValue";
 +
}
 +
 
 +
//===----------------------------------------------------------------------===//
 +
// Pattern fragments
 +
//===----------------------------------------------------------------------===//
 +
// Definition of anyextload used in the loading of vector types < 512 bits
 +
def anyextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr),
 +
    [{ return cast<LoadSDNode>(N)->getExtensionType() != ISD::NON_EXTLOAD;}]>;
 +
 
 +
//----------------------------------------------------------------------------//
 +
//------------------------------ LOAD AND STORE ------------------------------//
 +
//----------------------------------------------------------------------------//
 +
 
 +
def MemStore : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{
 +
                            if(cast<StoreSDNode>(N)->getAddressSpace() != 77)
 +
                              return !cast<StoreSDNode>(N)->isTruncatingStore();
 +
                            else
 +
                              return false;}]>;
 +
 
 +
def MemLoad : PatFrag<(ops node:$ptr), (load node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
            return cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
 +
          else
 +
            return false;}]>;
 +
 
 +
def ScratchpadStore : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{
 +
                            if(cast<StoreSDNode>(N)->getAddressSpace() == 77)
 +
                              return !cast<StoreSDNode>(N)->isTruncatingStore();
 +
                            else
 +
                              return false;}]>;
 +
 
 +
def ScratchpadLoad : PatFrag<(ops node:$ptr), (load node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
            return cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
 +
          else
 +
            return false;}]>;
 +
 
 +
//---------------- EXTLOAD scalar ----------------//
 +
def extloadi1_mem : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
 +
          else
 +
            return false;}]>;
 +
def extloadi1_scratch : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
 +
          else
 +
            return false;}]>;
 +
 
 +
def extloadi8_mem : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
 +
          else
 +
            return false;}]>;
 +
def extloadi8_scratch : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
 +
          else
 +
            return false;}]>;
 +
 
 +
def extloadi16_mem : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
 +
          else
 +
            return false;}]>;
 +
def extloadi16_scratch : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
 +
          else
 +
            return false;}]>;
 +
 
 +
def extloadi32_mem : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
 +
          else
 +
            return false;}]>;
 +
def extloadi32_scratch : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
 +
          else
 +
            return false;}]>;
 +
 
 +
//---------------- ZEXTLOAD scalar ----------------//
 +
def zextloadi1_mem : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
 +
          else
 +
            return false;}]>;
 +
def zextloadi1_scratch : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
 +
          else
 +
            return false;}]>;
 +
 
 +
def zextloadi8_mem : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
 +
          else
 +
            return false;}]>;
 +
def zextloadi8_scratch : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
 +
          else
 +
            return false;}]>;
 +
 
 +
def zextloadi16_mem : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
 +
          else
 +
            return false;}]>;
 +
def zextloadi16_scratch : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
 +
          else
 +
            return false;}]>;
 +
 
 +
def zextloadi32_mem : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
 +
          else
 +
            return false;}]>;
 +
def zextloadi32_scratch : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
 +
          else
 +
            return false;}]>;
 +
 
 +
//---------------- ZEXTLOAD vector ----------------//
 +
def zextloadv16i8_mem: PatFrag<(ops node:$ptr), (zextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i8;
 +
      else
 +
        return false; }]>;
 +
def zextloadv16i8_scratch: PatFrag<(ops node:$ptr), (zextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i8;
 +
      else
 +
        return false; }]>;
 +
 
 +
def zextloadv16i16_mem: PatFrag<(ops node:$ptr), (zextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i16;
 +
      else
 +
        return false; }]>;
 +
def zextloadv16i16_scratch: PatFrag<(ops node:$ptr), (zextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i16;
 +
      else
 +
        return false; }]>;
 +
 
 +
def zextloadv8i8_mem: PatFrag<(ops node:$ptr), (zextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i8;
 +
      else
 +
        return false; }]>;
 +
def zextloadv8i8_scratch: PatFrag<(ops node:$ptr), (zextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i8;
 +
      else
 +
        return false; }]>;
 +
 
 +
def zextloadv8i16_mem: PatFrag<(ops node:$ptr), (zextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i16;
 +
      else
 +
        return false; }]>;
 +
def zextloadv8i16_scratch: PatFrag<(ops node:$ptr), (zextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i16;
 +
      else
 +
        return false; }]>;
 +
 
 +
def zextloadv8i32_mem: PatFrag<(ops node:$ptr), (zextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i32;
 +
      else
 +
        return false; }]>;
 +
def zextloadv8i32_scratch: PatFrag<(ops node:$ptr), (zextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i32;
 +
      else
 +
        return false; }]>;                 
 +
 
 +
 
 +
//---------------- SEXTLOAD scalar ----------------//
 +
def sextloadi1_mem : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
 +
          else
 +
            return false;}]>;
 +
def sextloadi1_scratch : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
 +
          else
 +
            return false;}]>;
 +
 
 +
def sextloadi8_mem : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
 +
          else
 +
            return false;}]>;
 +
def sextloadi8_scratch : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
 +
          else
 +
            return false;}]>;
 +
 
 +
def sextloadi16_mem : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
 +
          else
 +
            return false;}]>;
 +
def sextloadi16_scratch : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
 +
          else
 +
            return false;}]>;
 +
 
 +
def sextloadi32_mem : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
 +
          else
 +
            return false;}]>;
 +
def sextloadi32_scratch : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
 +
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
 +
          else
 +
            return false;}]>;
 +
 +
//---------------- SEXTLOAD vector ----------------//
 +
def sextloadv16i8_mem: PatFrag<(ops node:$ptr), (sextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i8;
 +
      else
 +
        return false; }]>;
 +
def sextloadv16i8_scratch: PatFrag<(ops node:$ptr), (sextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i8;
 +
      else
 +
        return false; }]>;
 +
 
 +
def sextloadv16i16_mem: PatFrag<(ops node:$ptr), (sextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i16;
 +
      else
 +
        return false; }]>;
 +
def sextloadv16i16_scratch: PatFrag<(ops node:$ptr), (sextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i16;
 +
      else
 +
        return false; }]>;
 +
 
 +
def sextloadv8i8_mem: PatFrag<(ops node:$ptr), (sextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i8;
 +
      else
 +
        return false; }]>;
 +
def sextloadv8i8_scratch: PatFrag<(ops node:$ptr), (sextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i8;
 +
      else
 +
        return false; }]>;
 +
 
 +
def sextloadv8i16_mem: PatFrag<(ops node:$ptr), (sextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i16;
 +
      else
 +
        return false; }]>;
 +
def sextloadv8i16_scratch: PatFrag<(ops node:$ptr), (sextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i16;
 +
      else
 +
        return false; }]>;
 +
 
 +
def sextloadv8i32_mem: PatFrag<(ops node:$ptr), (sextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i32;
 +
      else
 +
        return false; }]>;
 +
def sextloadv8i32_scratch: PatFrag<(ops node:$ptr), (sextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i32;
 +
      else
 +
        return false; }]>;                                   
 +
 
 +
 +
//---------------- ANYEXTLOAD vector ----------------//
 +
def anyextloadv16i8_mem: PatFrag<(ops node:$ptr), (anyextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i8;
 +
      else
 +
        return false; }]>;
 +
def anyextloadv16i8_scratch: PatFrag<(ops node:$ptr), (anyextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i8;
 +
      else
 +
        return false; }]>;
 +
 
 +
def anyextloadv16i16_mem: PatFrag<(ops node:$ptr), (anyextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i16;
 +
      else
 +
        return false; }]>;
 +
def anyextloadv16i16_scratch: PatFrag<(ops node:$ptr), (anyextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i16;
 +
      else
 +
        return false; }]>;
 +
 
 +
def anyextloadv8i8_mem: PatFrag<(ops node:$ptr), (anyextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i8;
 +
      else
 +
        return false; }]>;
 +
def anyextloadv8i8_scratch: PatFrag<(ops node:$ptr), (anyextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i8;
 +
      else
 +
        return false; }]>;       
 +
 
 +
def anyextloadv8i16_mem: PatFrag<(ops node:$ptr), (anyextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i16;
 +
      else
 +
        return false; }]>;
 +
def anyextloadv8i16_scratch: PatFrag<(ops node:$ptr), (anyextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i16;
 +
      else
 +
        return false; }]>;
 +
 
 +
def anyextloadv8i32_mem: PatFrag<(ops node:$ptr), (anyextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i32;
 +
      else
 +
        return false; }]>;
 +
def anyextloadv8i32_scratch: PatFrag<(ops node:$ptr), (anyextload node:$ptr),
 +
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
 +
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i32;
 +
      else
 +
        return false; }]>;
 +
 
 +
 +
//---------------- TRUNCSTORE scalar ----------------//
 +
def truncstorei1_mem : PatFrag<(ops node:$val, node:$ptr),
 +
                          (truncstore node:$val, node:$ptr), [{
 +
          if(cast<StoreSDNode>(N)->getAddressSpace() != 77)
 +
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
 +
          else
 +
            return false;}]>;
 +
def truncstorei1_scratch : PatFrag<(ops node:$val, node:$ptr),
 +
                          (truncstore node:$val, node:$ptr), [{
 +
          if(cast<StoreSDNode>(N)->getAddressSpace() == 77)
 +
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
 +
          else
 +
            return false;}]>;
 +
 
 +
def truncstorei8_mem : PatFrag<(ops node:$val, node:$ptr),
 +
                          (truncstore node:$val, node:$ptr), [{
 +
          if(cast<StoreSDNode>(N)->getAddressSpace() != 77)
 +
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
 +
          else
 +
            return false;}]>;
 +
def truncstorei8_scratch : PatFrag<(ops node:$val, node:$ptr),
 +
                          (truncstore node:$val, node:$ptr), [{
 +
          if(cast<StoreSDNode>(N)->getAddressSpace() == 77)
 +
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
 +
          else
 +
            return false;}]>;
 +
 
 +
def truncstorei16_mem : PatFrag<(ops node:$val, node:$ptr),
 +
                          (truncstore node:$val, node:$ptr), [{
 +
          if(cast<StoreSDNode>(N)->getAddressSpace() != 77)
 +
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
 +
          else
 +
            return false;}]>;
 +
def truncstorei16_scratch : PatFrag<(ops node:$val, node:$ptr),
 +
                          (truncstore node:$val, node:$ptr), [{
 +
          if(cast<StoreSDNode>(N)->getAddressSpace() == 77)
 +
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
 +
          else
 +
            return false;}]>;
 +
 
 +
def truncstorei32_mem : PatFrag<(ops node:$val, node:$ptr),
 +
                          (truncstore node:$val, node:$ptr), [{
 +
          if(cast<StoreSDNode>(N)->getAddressSpace() != 77)
 +
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
 +
          else
 +
            return false;}]>;
 +
def truncstorei32_scratch : PatFrag<(ops node:$val, node:$ptr),
 +
                          (truncstore node:$val, node:$ptr), [{
 +
          if(cast<StoreSDNode>(N)->getAddressSpace() == 77)
 +
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
 +
          else
 +
            return false;}]>;
 +
 
 +
//---------------- TRUNCSTORE vector ----------------//
 +
def truncstorev16i8_mem: PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr),
 +
    [{ if(cast<StoreSDNode>(N)->getAddressSpace() != 77)
 +
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v16i8;
 +
          else
 +
            return false; }]>;
 +
def truncstorev16i8_scratch: PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr),
 +
    [{ if(cast<StoreSDNode>(N)->getAddressSpace() == 77)
 +
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v16i8;
 +
          else
 +
            return false; }]>; 
 +
 
 +
def truncstorev16i16_mem: PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr),
 +
    [{ if(cast<StoreSDNode>(N)->getAddressSpace() != 77)
 +
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v16i16;
 +
          else
 +
            return false; }]>;
 +
def truncstorev16i16_scratch: PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr),
 +
    [{ if(cast<StoreSDNode>(N)->getAddressSpace() == 77)
 +
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v16i16;
 +
          else
 +
            return false; }]>;
 +
 
 +
def truncstorev8i8_mem: PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr),
 +
    [{ if(cast<StoreSDNode>(N)->getAddressSpace() != 77)
 +
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v8i8;
 +
          else
 +
            return false; }]>;
 +
def truncstorev8i8_scratch: PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr),
 +
    [{ if(cast<StoreSDNode>(N)->getAddressSpace() == 77)
 +
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v8i8;
 +
          else
 +
            return false; }]>;
  
The class provides also flags to capture information about the high-level semantics of the instruction. The ones used in the nu+ back-end are:  
+
def truncstorev8i16_mem: PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr),
*'''isBranch''', equals to 1 if the instruction is a branch.
+
    [{ if(cast<StoreSDNode>(N)->getAddressSpace() != 77)
*'''isIndirectBranch''', equals to 1 if the instruction is an indirect branch, such as a branch through a register.
+
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v8i16;
*'''isBarrier''', equals to 1 if the instruction stops control flow from executing the instruction immediately following it. Examples include unconditional branches and return instructions.
+
          else
*'''isTerminator''', equals to 1 if the instruction is a terminator for a basic block.
+
            return false; }]>;
*'''isPseudo''', equals to 1 if the instruction is a pseudo instruction, i.e. an instruction that doesn't correspond to a real machine instruction.
+
def truncstorev8i16_scratch: PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr),
*'''isCodeGenOnly''', equals to 1 if the instruction is a pseudo instruction used for codegen modeling purposes.
+
    [{ if(cast<StoreSDNode>(N)->getAddressSpace() == 77)
*'''isAsmParserOnly''',  equals to 1 if the instruction is  a pseudo instruction for use by the assembler parser. In this way the disassembler does use the asm string of the instruction. This is useful when two or more instructions share the same encoding, thus generating a disassembler conflict.
+
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v8i16;
*'''isReturn''', equals to 1 if the instruction is a return instruction.
+
          else
*'''isCall''',  equals to 1 if the instruction is a call instruction.
+
            return false; }]>;
  
An instruction can also specify if it implicitly reads or writes non-operand registers, by specifying the registers in the '''Uses''' and
+
def truncstorev8i32_mem: PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr),
'''Defs''' fields. The former contains a list of registers that the instruction uses (reads), the latter contains a list of registers that the instruction defines (writes).
+
    [{ if(cast<StoreSDNode>(N)->getAddressSpace() != 77)
 +
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v8i32;
 +
          else
 +
            return false; }]>;
 +
def truncstorev8i32_scratch: PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr),
 +
    [{ if(cast<StoreSDNode>(N)->getAddressSpace() == 77)
 +
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v8i32;
 +
          else
 +
            return false; }]>;                                                       
  
To handle the nu+ ISA complexity, a hierarchy of classes has been created. Each level of the hierarchy refines an aspect of the nu+ instruction formats. For example the '''FR_TwoOp_Unmasked_32''' class refines the '''FR''' class providing an easy way to define unmasked instructions of type '''R''' that takes two 32-bit operands. The following picture depicts all the classes derived from FR.
+
 +
// insertelt SDNode redefinition
 +
def VecInsert : SDTypeProfile<1, 3, [    // vector insert
 +
  SDTCisSameAs<0, 1>, SDTCisPtrTy<3>
 +
]>;
  
[[File:Formats_hierarchy.jpg|frame|This picture shows how the classes are inherited from the FR class.]]
+
def insert_elt  : SDNode<"ISD::INSERT_VECTOR_ELT", VecInsert>;
  
The instruction formats classes are then used to create instruction ''multiclasses''. In this way all the possible variants are generated with a single instruction definition.
+
//===----------------------------------------------------------------------===//
An example is the '''FArithInt_TwoOp''' multiclass. It is used with arithmetic instructions with two integer operands. When FArithInt_TwoOp instrcution is defined, Talbegen automatically instantiate all the possible variations according to the classes contained in the multiclass definition.
+
//  Describe NaplesPU Special Registers
 +
//
 +
//
 +
//===----------------------------------------------------------------------===//
  
However, there is also a '''Pseudo''' class which can be used for nodes that cannot be translated into machine nodes through a pattern but require other transformations.
+
class SpReg<bits<6> reg> {
 +
  bits<6> Register = reg;
 +
}
  
== Pattern Fragments ==
+
def MaskReg : SpReg<59>;
In the file are also defined custom pattern fragments (the default ones are included in the file ''TargetSelectionDAG.td'') used to help LLVM to match LLVM IR patterns. A '''pattern fragment''', represented by the class '''PatFrag''', can match something on the DAG, from a single node to multiple nested other fragments, by specifying the input '''operands''', the '''dag fragment''' to match that satisfy a '''predicate''' (if applicable, default none) and even the '''transformation''' to perform through a '''SDNodeXForm''' (if applicable, default NOOP_SDNodeXForm).
 
  
 +
//===----------------------------------------------------------------------===//
 +
//  Describe NaplesPU scalar or vector instructions
 +
//
 +
//  Fmt  - 0 if a register is scalar, 1 if vector
 +
//===----------------------------------------------------------------------===//
  
<syntaxhighlight lang="c" line='line'>
+
class Fmt<bit val> {
class PatFrag<dag ops, dag frag, code pred = [{}],
+
   bit Value = val;
              SDNodeXForm xform = NOOP_SDNodeXForm> : SDPatternOperator {
 
  dag Operands = ops;
 
  dag Fragment = frag;
 
  code PredicateCode = pred;
 
  code ImmediateCode = [{}];
 
   SDNodeXForm OperandTransform = xform;
 
 
}
 
}
</syntaxhighlight>
 
  
As an example lets consider the nu+ load-store pattern fragments. Since nu+ has two addressing spaces, the '''main memory''' and the '''scratchpad memory''', these pattern fragments are used to detect where loads and stores are directed. This is done specifying a predicate (written in C++) that checks the associated addressing space.
+
def Fmt_S : Fmt<0>;
 +
def Fmt_V : Fmt<1>;
  
<syntaxhighlight lang="c" line='line'>
+
//===----------------------------------------------------------------------===//
def MemStore : PatFrag<(ops node:$val, node:$ptr),
+
// Describe NaplesPU instructions format here
                      (store node:$val, node:$ptr), [{
+
//===----------------------------------------------------------------------===//
              if(cast<StoreSDNode>(N)->getAddressSpace() != 77)
 
                  return !cast<StoreSDNode>(N)->isTruncatingStore();
 
              else
 
                  return false;}]>;
 
  
def ScratchpadStore : PatFrag<(ops node:$val, node:$ptr),
+
class InstNaplesPU<dag outs, dag ins, string asmstr, list<dag> pattern>
                              (store node:$val, node:$ptr), [{
+
          : Instruction {
              if(cast<StoreSDNode>(N)->getAddressSpace() == 77)
+
  field bits<32> Inst;
                return !cast<StoreSDNode>(N)->isTruncatingStore();
 
              else
 
                return false;}]>;
 
</syntaxhighlight>
 
  
Starting from the PatFrag class, other useful classes are derived such as the '''OutPatFrag''' class and the '''PatLeaf''' class. The '''OutPatFrag''' class is pattern fragment but do not have predicates or transforms, used to avoid repeated subexpressions in output patterns.
+
  let Namespace = "NaplesPU";
The '''PatLeaf''' class is a pattern fragments that have no operands and is used as a helper.
+
  let Size = 4;
  
<syntaxhighlight lang="c" line='line'>
+
  dag OutOperandList = outs;
class OutPatFrag<dag ops, dag frag>
+
  dag InOperandList = ins;
        : PatFrag<ops, frag, [{}], NOOP_SDNodeXForm>;
+
  let AsmString  = asmstr;
 +
  let Pattern = pattern;
  
class PatLeaf<dag frag, code pred = [{}], SDNodeXForm xform = NOOP_SDNodeXForm>
+
  //let DecoderNamespace = "NaplesPU";
        : PatFrag<(ops), frag, pred, xform>;
+
  field bits<32> SoftFail = 0;
</syntaxhighlight>
+
}
  
In the nu+ backend '''OutPatFrag''' class is used to help with the extraction and insertion of sub-registers.
 
  
<syntaxhighlight lang="c" line='line'>
+
//===----------------------------------------------------------------------===//
def GetEvenReg: OutPatFrag<(ops node:$Rs),
+
// Format R instruction class in NaplesPU : <00|opcode|rd|rs0|rs1|unused|l|fmt|m|>
                          (EXTRACT_SUBREG (i64 $Rs), sub_even)>;
+
// l: if 1, 64 bit mode
 +
// fmt2: FMT value for rd register
 +
// fmt1: FMT value for rs0 register
 +
// fmt0: FMT value for rs1 register
 +
// m: if 1, masked
 +
//
 +
//===----------------------------------------------------------------------===//
  
def GetOddReg: OutPatFrag<(ops node:$Rs),
+
class FR<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l, Fmt fmt2, Fmt fmt1, Fmt fmt0, bit m>
                          (EXTRACT_SUBREG (i64 $Rs), sub_odd)>;
+
  : InstNaplesPU<outs, ins, asmstr, pattern> {
 +
  bits <6> dst;
 +
  bits <6> src0;
  
def SetEvenReg: OutPatFrag<(ops node:$Rs),
+
  let Inst{31-30} = 0;
                          (i64 (SUBREG_TO_REG (i64 0), (i32 $Rs), sub_even))>;
+
  let Inst{29-24} = opcode;
 +
  let Inst{23-18} = dst;
 +
  let Inst{17-12} = src0;
 +
  let Inst{5} = 0; //unused
 +
  let Inst{4} = l;
 +
  let Inst{3} = fmt2.Value;
 +
  let Inst{2} = fmt1.Value;
 +
  let Inst{1} = fmt0.Value;
 +
  let Inst{0} = m;
 +
}
  
def SetOddReg: OutPatFrag<(ops node:$Rs),
+
class FR_TwoOp<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l, Fmt fmt2, Fmt fmt1, Fmt fmt0, bit m>
                          (i64 (SUBREG_TO_REG (i64 0), (i32 $Rs), sub_odd))>;
+
  : FR<outs, ins, asmstr, pattern, opcode, l, fmt2, fmt1, fmt0, m> {
</syntaxhighlight>
+
  bits <6> src1;
  
In the nu+ backend '''PatLeaf''' is used to define 16-bit and 9-bit immediates.
+
  let Inst{11-6} = src1;
 +
}
  
<syntaxhighlight lang="c" line='line'>
+
class FR_OneOp<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l, Fmt fmt2, Fmt fmt1, Fmt fmt0, bit m>
 +
  : FR<outs, ins, asmstr, pattern, opcode, l, fmt2, fmt1, fmt0, m> {
  
def simm16 : PatLeaf<(imm), [{ return isInt<16>(N->getSExtValue()); }]>;
+
  let Inst{11-6} = 0;
 +
}
  
def simm9 : PatLeaf<(imm), [{ return isInt<9>(N->getSExtValue()); }]>;
+
class FR_TwoOp_Masked<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l, Fmt fmt2, Fmt fmt1, Fmt fmt0>
</syntaxhighlight>
+
  : FR_TwoOp<outs, ins, asmstr, pattern, opcode, l, fmt2, fmt1, fmt0, 1> {
 +
  let Uses = [MR_REG];
 +
}
 +
 
 +
class FR_TwoOp_Unmasked<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l, Fmt fmt2, Fmt fmt1, Fmt fmt0>
 +
  : FR_TwoOp<outs, ins, asmstr, pattern, opcode, l, fmt2, fmt1, fmt0, 0> {
  
The ''''SDNodeXForm''', mentioned above, is a class provided in order to manipulate nodes in the output DAG once a match has been formed and is typically used to manipulate immediate values.
+
}
As an example, the '''LO32I''' transformation node is used to take the 32 less significant bits from 64-bit integer immediates.
 
  
<syntaxhighlight lang="c" line='line'>
+
class FR_OneOp_Masked<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l, Fmt fmt2, Fmt fmt1>
def LO32I : SDNodeXForm<imm, [{
+
  : FR_OneOp<outs, ins, asmstr, pattern, opcode, l, fmt2, fmt1, Fmt_S, 1> {
            return CurDAG->getTargetConstant((unsigned)N->getAPIntValue().getLoBits(32).getZExtValue(), SDLoc(N), MVT::i32);}]>;
+
  let Uses = [MR_REG];
</syntaxhighlight>
+
}
 +
 
 +
class FR_OneOp_Unmasked<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l, Fmt fmt2, Fmt fmt1>
 +
  : FR_OneOp<outs, ins, asmstr, pattern, opcode, l, fmt2, fmt1, Fmt_S, 0> {
 +
 
 +
}
 +
 
 +
class FR_TwoOp_Masked_32<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, Fmt fmt2, Fmt fmt1, Fmt fmt0>
 +
  : FR_TwoOp_Masked<outs, ins, asmstr, pattern, opcode, 0, fmt2, fmt1, fmt0> {
 +
 
 +
}
 +
 
 +
class FR_TwoOp_Masked_64<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, Fmt fmt2, Fmt fmt1, Fmt fmt0>
 +
  : FR_TwoOp_Masked<outs, ins, asmstr, pattern, opcode, 1, fmt2, fmt1, fmt0> {
 +
 
 +
}
 +
 
 +
class FR_TwoOp_Unmasked_32<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, Fmt fmt2, Fmt fmt1, Fmt fmt0>
 +
  : FR_TwoOp_Unmasked<outs, ins, asmstr, pattern, opcode, 0, fmt2, fmt1, fmt0> {
 +
 
 +
}
 +
 
 +
class FR_TwoOp_Unmasked_64<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, Fmt fmt2, Fmt fmt1, Fmt fmt0>
 +
  : FR_TwoOp_Unmasked<outs, ins, asmstr, pattern, opcode, 1, fmt2, fmt1, fmt0> {
 +
 
 +
}
 +
 
 +
class FR_OneOp_Masked_32<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, Fmt fmt2, Fmt fmt1>
 +
  : FR_OneOp_Masked<outs, ins, asmstr, pattern, opcode, 0, fmt2, fmt1> {
 +
 
 +
}
 +
 
 +
class FR_OneOp_Masked_64<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, Fmt fmt2, Fmt fmt1>
 +
  : FR_OneOp_Masked<outs, ins, asmstr, pattern, opcode, 1, fmt2, fmt1> {
 +
 
 +
}
 +
 
 +
class FR_OneOp_Unmasked_32<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, Fmt fmt2, Fmt fmt1>
 +
  : FR_OneOp_Unmasked<outs, ins, asmstr, pattern, opcode, 0, fmt2, fmt1> {
 +
 
 +
}
 +
 
 +
class FR_OneOp_Unmasked_64<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, Fmt fmt2, Fmt fmt1>
 +
  : FR_OneOp_Unmasked<outs, ins, asmstr, pattern, opcode, 1, fmt2, fmt1> {
 +
 
 +
}
 +
 
 +
//===----------------------------------------------------------------------===//
 +
// Format I instruction class in NaplesPU : <010|opcode|rd|rs|imm|fmt|m|>
 +
// fmt1: FMT value for rd register
 +
// fmt0: FMT value for rs register
 +
// m: if 1 masked
 +
//===----------------------------------------------------------------------===//
 +
 
 +
class FI<dag outs, dag ins, string asmstr, list<dag> pattern, bits<5> opcode, Fmt fmt1, Fmt fmt0, bit m>
 +
    : InstNaplesPU<outs, ins, asmstr, pattern> {
 +
  bits <6> dst;
 +
  bits <9> imm;
 +
 
 +
  let Inst{31-29} = 0b010;
 +
  let Inst{28-24} = opcode;
 +
  let Inst{23-18} = dst;
 +
  let Inst{11-3} = imm;
 +
  let Inst{2} = fmt1.Value;
 +
  let Inst{1} = fmt0.Value;
 +
  let Inst{0} = m;
 +
}
 +
 
 +
class FI_OneOp<dag outs, dag ins, string asmstr, list<dag> pattern, bits<5> opcode, Fmt fmt1, Fmt fmt0, bit m> : FI<outs, ins, asmstr, pattern, opcode, fmt1, fmt0, m> {
 +
  bits <6> src;
 +
 
 +
  let Inst{17-12} = src;
 +
}
 +
 
 +
class FI_NoOp<dag outs, dag ins, string asmstr, list<dag> pattern, bits<5> opcode, Fmt fmt1> : FI<outs, ins, asmstr, pattern, opcode, fmt1, Fmt_S, 0> {
 +
  let Inst{17-12} = 0;
 +
}
 +
 
 +
class FI_OneOp_Masked<dag outs, dag ins, string asmstr, list<dag> pattern, bits<5> opcode, Fmt fmt1, Fmt fmt0> : FI_OneOp<outs, ins, asmstr, pattern, opcode, fmt1, fmt0, 1> {
 +
  let Uses = [MR_REG];
 +
}
 +
 
 +
class FI_OneOp_Unmasked<dag outs, dag ins, string asmstr, list<dag> pattern, bits<5> opcode, Fmt fmt1, Fmt fmt0> : FI_OneOp<outs, ins, asmstr, pattern, opcode, fmt1, fmt0, 0> {
 +
 
 +
}
 +
 
 +
//===----------------------------------------------------------------------===//
 +
// Format MOVEI instruction class in NaplesPU : <01100|opcode|rd|imm|fmt|m|>
 +
//===----------------------------------------------------------------------===//
 +
 
 +
class FMOVEI<dag outs, dag ins, string asmstr, list<dag> pattern, bits<3> opcode, Fmt fmt, bit m>
 +
      : InstNaplesPU<outs, ins, asmstr, pattern> {
 +
  bits <6> dst;
 +
  bits <16> imm;
 +
 
 +
  let Inst{31-27} = 0b01100;
 +
  let Inst{26-24} = opcode;
 +
  let Inst{23-18} = dst;
 +
  let Inst{17-2} = imm;
 +
  let Inst{1} = fmt.Value;
 +
  let Inst{0} = m;
 +
}
 +
 
 +
//===----------------------------------------------------------------------===//
 +
// Format M instruction class in NaplesPU : <10|opcode|rd/rs|rptr|off|l|s|m|>
 +
//===----------------------------------------------------------------------===//
 +
 
 +
class FM<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l, bit s, bit m>
 +
    : InstNaplesPU<outs, ins, asmstr, pattern> {
 +
  bits <6> dstsrc;
 +
  bits <15> addr; //base address and offset encoded on the same 15 bits value (check encodeMemoryOpValue)
 +
 
 +
  let Inst{31-30} = 0b10;
 +
  let Inst{29-24} = opcode;
 +
  let Inst{23-18} = dstsrc;
 +
  let Inst{17-12} = addr{5-0};
 +
  let Inst{11-3} = addr{14-6};
 +
  let Inst{2} = l;
 +
  let Inst{1} = s;
 +
  let Inst{0} = m;
 +
}
 +
 
 +
class FM_Unmasked<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l, bit s>
 +
  : FM<outs, ins, asmstr, pattern, opcode, l, s, 0> {
 +
}
 +
 
 +
class FM_Masked<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l, bit s>
 +
  : FM<outs, ins, asmstr, pattern, opcode, l, s, 1> {
 +
  let Uses = [MR_REG];
 +
}
 +
 
 +
class FM_Unmasked_Mainmem<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l>
 +
  : FM_Unmasked<outs, ins, asmstr, pattern, opcode, l, 0> {
 +
}
 +
 
 +
class FM_Unmasked_Scratchpad<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l>
 +
  : FM_Unmasked<outs, ins, asmstr, pattern, opcode, l, 1> {
 +
}
 +
 
 +
class FM_Masked_Mainmem<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l>
 +
  : FM_Masked<outs, ins, asmstr, pattern, opcode, l, 0> {
 +
}
 +
 
 +
class FM_Masked_Scratchpad<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l>
 +
  : FM_Masked<outs, ins, asmstr, pattern, opcode, l, 1> {
 +
}
 +
 
 +
class FM_Unmasked_Mainmem_32<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode>
 +
  : FM_Unmasked_Mainmem<outs, ins, asmstr, pattern, opcode, 0> {
 +
}
 +
 
 +
class FM_Unmasked_Scratchpad_32<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode>
 +
  : FM_Unmasked_Scratchpad<outs, ins, asmstr, pattern, opcode, 0> {
 +
}
 +
 
 +
class FM_Masked_Mainmem_32<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode>
 +
  : FM_Masked_Mainmem<outs, ins, asmstr, pattern, opcode, 0> {
 +
}
 +
 
 +
class FM_Masked_Scratchpad_32<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode>
 +
  : FM_Masked_Scratchpad<outs, ins, asmstr, pattern, opcode, 0> {
 +
}
 +
 
 +
class FM_Unmasked_Mainmem_64<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode>
 +
  : FM_Unmasked_Mainmem<outs, ins, asmstr, pattern, opcode, 1> {
 +
}
 +
 
 +
class FM_Unmasked_Scratchpad_64<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode>
 +
  : FM_Unmasked_Scratchpad<outs, ins, asmstr, pattern, opcode, 1> {
 +
}
 +
 
 +
class FM_Masked_Mainmem_64<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode>
 +
  : FM_Masked_Mainmem<outs, ins, asmstr, pattern, opcode, 1> {
 +
}
 +
 
 +
class FM_Masked_Scratchpad_64<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode>
 +
  : FM_Masked_Scratchpad<outs, ins, asmstr, pattern, opcode, 1> {
 +
}
 +
 
 +
 
 +
//===----------------------------------------------------------------------===//
 +
// Format J/BR instruction class in NaplesPU
 +
// FJR: <0111|type(0/1)|opcode|rd|imm|>
 +
// FJ:  <0111|type(0/1)|opcode|imm|>
 +
//===----------------------------------------------------------------------===//
 +
 
 +
class FJ_ALL<dag outs, dag ins, string asmstr, list<dag> pattern, bits<3> opcode>
 +
    : InstNaplesPU<outs, ins, asmstr, pattern> {
 +
 
 +
  let Inst{31-28} = 0b0111;
 +
  let Inst{26-24} = opcode;
 +
  let isBranch = 1;
 +
}
 +
 
 +
class FJR<dag outs, dag ins, string asmstr, list<dag> pattern, bits<3> opcode>
 +
    : FJ_ALL<outs, ins, asmstr, pattern, opcode> {
 +
  bits <6> cond;
 +
  bits <18> addr;
 +
 
 +
  let Inst{27} = 0;
 +
  let Inst{23-18} = cond;
 +
  let Inst{17-0} = addr;
 +
}
 +
 
 +
class FJ<dag outs, dag ins, string asmstr, list<dag> pattern, bits<3> opcode>
 +
    : FJ_ALL<outs, ins, asmstr, pattern, opcode> {
 +
  bits <24> addr;
 +
 
 +
  let Inst{27} = 1;
 +
  let Inst{23-0} = addr;
 +
}
 +
 
 +
//===----------------------------------------------------------------------===//
 +
// Format C instruction class in NaplesPU
 +
// FC:  <01101|opcode|rs0|rs1|unused|>
 +
//===----------------------------------------------------------------------===//
 +
 
 +
class FC<dag outs, dag ins, string asmstr, list<dag> pattern, bits<3> opcode>
 +
    : InstNaplesPU<outs, ins, asmstr, pattern> {
 +
 
 +
  bits <6> src0;
 +
  bits <6> src1;
 +
 
 +
  let Inst{31-27} = 0b01101;
 +
  let Inst{26-24} = opcode;
 +
  let Inst{23-18} = src0;
 +
  let Inst{17-12} = src1;
 +
  let Inst{11-0} = 0; //unused
 +
}
 +
 
 +
//===----------------------------------------------------------------------===//
 +
//  A set of multiclasses is used to handle Vector/Scalar combinations
 +
//  SS: Scalar = Op Scalar
 +
//  VV: Vector = Op Vector
 +
//  SI: Vector = Op Immediate
 +
//  SSS: Scalar = Scalar Op Scalar
 +
//  VVS: Vector = Vector Op Scalar
 +
//  VVV: Vector = Vector Op Vector
 +
//  SVV: Scalar = Vector Op Vector
 +
//  SSI: Scalar = Vector Op Immediate
 +
//  VVI: Scalar = Vector Op Immediate
 +
//===----------------------------------------------------------------------===//
 +
 
 +
multiclass FArithInt_TwoOp<string operator, SDNode OpNode, bits<6> opcode> {
 +
  // FR - SSS - 32 bit integer
 +
  def SSS_32 : FR_TwoOp_Unmasked_32<
 +
    (outs GPR32:$dst),
 +
    (ins GPR32:$src0, GPR32:$src1),
 +
    operator # "_i32 $dst, $src0, $src1",
 +
    [(set i32:$dst, (OpNode i32:$src0, i32:$src1))],
 +
    opcode,
 +
    Fmt_S,
 +
    Fmt_S,
 +
    Fmt_S>;
 +
 
 +
  // FR - VVS unmasked - 32 bit integer
 +
  def VVS_U_32 : FR_TwoOp_Unmasked_32<
 +
    (outs VR512W:$dst),
 +
    (ins VR512W:$src0, GPR32:$src1),
 +
    operator # "_i32 $dst, $src0, $src1",
 +
    [(set v16i32:$dst, (OpNode v16i32:$src0, (v16i32 (splat i32:$src1))))],
 +
    opcode,
 +
    Fmt_V,
 +
    Fmt_V,
 +
    Fmt_S>;
 +
 
 +
  // FR - VVV unmasked - 32 bit integer
 +
  def VVV_U_32 : FR_TwoOp_Unmasked_32<
 +
    (outs VR512W:$dst),
 +
    (ins VR512W:$src0, VR512W:$src1),
 +
    operator # "_i32 $dst, $src0, $src1",
 +
    [(set v16i32:$dst, (OpNode v16i32:$src0, v16i32:$src1))],
 +
    opcode,
 +
    Fmt_V,
 +
    Fmt_V,
 +
    Fmt_V>;
 +
 
 +
  let Constraints = "$dst = $oldvalue" in {
 +
    // FR - VVS masked - 32 bit integer
 +
    def VVS_M_32 : FR_TwoOp_Masked_32<
 +
      (outs VR512W:$dst),
 +
      (ins VR512W:$src0, GPR32:$src1, VR512W:$oldvalue),
 +
      operator # "_i32.m $dst, $src0, $src1",
 +
      [(set v16i32:$dst, (int_npu_vector_mixi32 (OpNode v16i32:$src0, (v16i32 (splat i32:$src1))),
 +
        v16i32:$oldvalue))],
 +
      opcode,
 +
      Fmt_V,
 +
      Fmt_V,
 +
      Fmt_S>;
 +
 
 +
    // FR - VVV masked - 32 bit integer
 +
    def VVV_M_32 : FR_TwoOp_Masked_32<
 +
      (outs VR512W:$dst),
 +
      (ins VR512W:$src0, VR512W:$src1, VR512W:$oldvalue),
 +
      operator # "_i32.m $dst, $src0, $src1",
 +
      [(set v16i32:$dst, (int_npu_vector_mixi32 (OpNode v16i32:$src0, v16i32:$src1),
 +
        v16i32:$oldvalue))],
 +
      opcode,
 +
      Fmt_V,
 +
      Fmt_V,
 +
      Fmt_V>;
 +
 
 +
  }
 +
  // FI - SSI
 +
  def SSI : FI_OneOp_Unmasked<
 +
    (outs GPR32:$dst),
 +
    (ins GPR32:$src, SIMM9OP:$imm),
 +
    operator # "i $dst, $src, $imm",
 +
    [(set i32:$dst, (OpNode i32:$src, (i32 simm9:$imm)))],
 +
    opcode{4-0},
 +
    Fmt_S,
 +
    Fmt_S>;
 +
 
 +
  // FI - VVI unmasked
 +
  def VVI_U : FI_OneOp_Unmasked<
 +
    (outs VR512W:$dst),
 +
    (ins VR512W:$src, SIMM9OP:$imm),
 +
    operator # "i $dst, $src, $imm",
 +
    [(set v16i32:$dst, (OpNode v16i32:$src, (v16i32 (splat simm9:$imm))))],
 +
    opcode{4-0},
 +
    Fmt_V,
 +
    Fmt_V>;
 +
 
 +
  // FI - VVI masked
 +
  let Constraints = "$dst = $oldvalue" in {
 +
    def VVI_M : FI_OneOp_Masked<
 +
    (outs VR512W:$dst),
 +
    (ins VR512W:$src, SIMM9OP:$imm, VR512W:$oldvalue),
 +
    operator # "i.m $dst, $src, $imm",
 +
    [(set v16i32:$dst, (int_npu_vector_mixi32 (OpNode v16i32:$src, (v16i32 (splat simm9:$imm))), v16i32:$oldvalue))],
 +
    opcode{4-0},
 +
    Fmt_V,
 +
    Fmt_V>;
 +
  }
 +
}
 +
 
 +
multiclass FArithInt_OneOp<string operator, SDNode OpNode, bits<6> opcode> {
 +
  // FR - SS - 32 bit integer
 +
  def SS_32 : FR_OneOp_Unmasked_32<
 +
    (outs GPR32:$dst),
 +
    (ins GPR32:$src0),
 +
    operator # "_i32 $dst, $src0",
 +
    [(set i32:$dst, (OpNode i32:$src0))],
 +
    opcode,
 +
    Fmt_S,
 +
    Fmt_S>;
 +
 
 +
  // FR - VV unmasked - 32 bit integer
 +
  def VV_U_32 : FR_OneOp_Unmasked_32<
 +
    (outs VR512W:$dst),
 +
    (ins VR512W:$src0),
 +
    operator # "_i32 $dst, $src0",
 +
    [(set v16i32:$dst, (OpNode v16i32:$src0))],
 +
    opcode,
 +
    Fmt_V,
 +
    Fmt_V>;
 +
 
 +
  let Constraints = "$dst = $oldvalue" in {
 +
    // FR - VV masked - 32 bit integer
 +
    def VV_M_32 : FR_OneOp_Masked_32<
 +
      (outs VR512W:$dst),
 +
      (ins VR512W:$src0, VR512W:$oldvalue),
 +
      operator # "_i32.m $dst, $src0",
 +
      [(set v16i32:$dst, (int_npu_vector_mixi32 (OpNode v16i32:$src0),
 +
        v16i32:$oldvalue))],
 +
      opcode,
 +
      Fmt_V,
 +
      Fmt_V>;
 +
 
 +
  }
 +
}
 +
 
 +
// per la shift rotate
 +
multiclass FSRInt_TwoOp<string operator, SDNode OpNode, bits<6> opcode> {
 +
  // FR - SSS - 32 bit integer
 +
  def SSS_32 : FR_TwoOp_Unmasked_32<
 +
    (outs GPR32:$dst),
 +
    (ins GPR32:$src0, GPR32:$src1),
 +
    operator # "_i32 $dst, $src0, $src1",
 +
    [(set i32:$dst, (OpNode i32:$src0, i32:$src1))],
 +
    opcode,
 +
    Fmt_S,
 +
    Fmt_S,
 +
    Fmt_S>;
 +
 
 +
  // FR - VVS unmasked - 32 bit integer
 +
  def VVS_U_32 : FR_TwoOp_Unmasked_32<
 +
    (outs VR512W:$dst),
 +
    (ins VR512W:$src0, GPR32:$src1),
 +
    operator # "_i32 $dst, $src0, $src1",
 +
    [(set v16i32:$dst, (OpNode v16i32:$src0, (v16i32 (splat i32:$src1))))],
 +
    opcode,
 +
    Fmt_V,
 +
    Fmt_V,
 +
    Fmt_S>;
 +
 
 +
  // FR - VVV unmasked - 32 bit integer
 +
  def VVV_U_32 : FR_TwoOp_Unmasked_32<
 +
    (outs VR512W:$dst),
 +
    (ins VR512W:$src0, VR512W:$src1),
 +
    operator # "_i32 $dst, $src0, $src1",
 +
    [(set v16i32:$dst, (OpNode v16i32:$src0, v16i32:$src1))],
 +
    opcode,
 +
    Fmt_V,
 +
    Fmt_V,
 +
    Fmt_V>;
 +
 
 +
  let Constraints = "$dst = $oldvalue" in {
 +
    // FR - VVS masked - 32 bit integer
 +
    def VVS_M_32 : FR_TwoOp_Masked_32<
 +
      (outs VR512W:$dst),
 +
      (ins VR512W:$src0, GPR32:$src1, VR512W:$oldvalue),
 +
      operator # "_i32.m $dst, $src0, $src1",
 +
      [(set v16i32:$dst, (int_npu_vector_mixi32 (OpNode v16i32:$src0, (v16i32 (splat i32:$src1))),
 +
        v16i32:$oldvalue))],
 +
      opcode,
 +
      Fmt_V,
 +
      Fmt_V,
 +
      Fmt_S>;
 +
 
 +
    // FR - VVV masked - 32 bit integer
 +
    def VVV_M_32 : FR_TwoOp_Masked_32<
 +
      (outs VR512W:$dst),
 +
      (ins VR512W:$src0, VR512W:$src1, VR512W:$oldvalue),
 +
      operator # "_i32.m $dst, $src0, $src1",
 +
      [(set v16i32:$dst, (int_npu_vector_mixi32 (OpNode v16i32:$src0, v16i32:$src1),
 +
        v16i32:$oldvalue))],
 +
      opcode,
 +
      Fmt_V,
 +
      Fmt_V,
 +
      Fmt_V>;
 +
 
 +
  }
 +
  // FI - SSI
 +
  def SSI : FI_OneOp_Unmasked<
 +
    (outs GPR32:$dst),
 +
    (ins GPR32:$src, SIMM9OP:$imm),
 +
    operator # "i $dst, $src, $imm",
 +
    [(set i32:$dst, (OpNode i32:$src, (i32 simm9:$imm)))],
 +
    opcode{4-0},
 +
    Fmt_S,
 +
    Fmt_S>;
 +
 
 +
  // FI - VVI unmasked
 +
  def VVI_U : FI_OneOp_Unmasked<
 +
    (outs VR512W:$dst),
 +
    (ins VR512W:$src, SIMM9OP:$imm),
 +
    operator # "i $dst, $src, $imm",
 +
    [(set v16i32:$dst, (OpNode v16i32:$src, (v16i32 (splat simm9:$imm))))],
 +
    opcode{4-0},
 +
    Fmt_V,
 +
    Fmt_V>;
 +
 
 +
  // FI - VVI masked
 +
  let Constraints = "$dst = $oldvalue" in {
 +
    def VVI_M : FI_OneOp_Masked<
 +
    (outs VR512W:$dst),
 +
    (ins VR512W:$src, SIMM9OP:$imm, VR512W:$oldvalue),
 +
    operator # "i.m $dst, $src, $imm",
 +
    [(set v16i32:$dst, (int_npu_vector_mixi32 (OpNode v16i32:$src, (v16i32 (splat simm9:$imm))), v16i32:$oldvalue))],
 +
    opcode{4-0},
 +
    Fmt_V,
 +
    Fmt_V>;
 +
  }
 +
}
 +
 
 +
multiclass FArithFloat_TwoOp<string operator, SDNode OpNode, bits<6> opcode> {
 +
  // FR - SSS - 32 bit float
 +
  def SSS_32 : FR_TwoOp_Unmasked_32<
 +
    (outs GPR32:$dst),
 +
    (ins GPR32:$src0, GPR32:$src1),
 +
    operator # "_f32 $dst, $src0, $src1",
 +
    [(set f32:$dst, (OpNode f32:$src0, f32:$src1))],
 +
    opcode,
 +
    Fmt_S,
 +
    Fmt_S,
 +
    Fmt_S>;
 +
 
 +
  // FR - VVS unmasked - 32 bit float
 +
  def VVS_U_32 : FR_TwoOp_Unmasked_32<
 +
    (outs VR512W:$dst),
 +
    (ins VR512W:$src0, GPR32:$src1),
 +
    operator # "_f32 $dst, $src0, $src1",
 +
    [(set v16f32:$dst, (OpNode v16f32:$src0, (splat f32:$src1)))],
 +
    opcode,
 +
    Fmt_V,
 +
    Fmt_V,
 +
    Fmt_S>;
 +
 
 +
  // FR - VVV unmasked - 32 bit float
 +
  def VVV_U_32 : FR_TwoOp_Unmasked_32<
 +
    (outs VR512W:$dst),
 +
    (ins VR512W:$src0, VR512W:$src1),
 +
    operator # "_f32 $dst, $src0, $src1",
 +
    [(set v16f32:$dst, (OpNode v16f32:$src0, v16f32:$src1))],
 +
    opcode,
 +
    Fmt_V,
 +
    Fmt_V,
 +
    Fmt_V>;
 +
 
 +
  let Constraints = "$dst = $oldvalue" in {
 +
    // FR - VVS masked - 32 bit float
 +
    def VVS_M_32 : FR_TwoOp_Masked_32<
 +
    (outs VR512W:$dst),
 +
    (ins VR512W:$src0, GPR32:$src1, VR512W:$oldvalue),
 +
    operator # "_f32.m $dst, $src0, $src1",
 +
    [(set v16f32:$dst, (int_npu_vector_mixf32 (OpNode v16f32:$src0, (splat f32:$src1)),
 +
        v16f32:$oldvalue))],
 +
    opcode,
 +
    Fmt_V,
 +
    Fmt_V,
 +
    Fmt_S>;
 +
 
 +
    // FR - VVV masked - 32 bit float
 +
    def VVV_M_32 : FR_TwoOp_Masked_32<
 +
    (outs VR512W:$dst),
 +
    (ins VR512W:$src0, VR512W:$src1, VR512W:$oldvalue),
 +
    operator # "_f32.m $dst, $src0, $src1",
 +
    [(set v16f32:$dst, (int_npu_vector_mixf32 (OpNode v16f32:$src0, v16f32:$src1),
 +
        v16f32:$oldvalue))],
 +
    opcode,
 +
    Fmt_V,
 +
    Fmt_V,
 +
    Fmt_V>;
 +
}
 +
}
 +
 
 +
multiclass FArithFloat_OneOp<string operator, SDNode OpNode, bits<6> opcode> {
 +
  // FR - SS - 32 bit float
 +
  def SS_32 : FR_OneOp_Unmasked_32<
 +
    (outs GPR32:$dst),
 +
    (ins GPR32:$src0),
 +
    operator # "_f32 $dst, $src0",
 +
    [(set f32:$dst, (OpNode f32:$src0))],
 +
    opcode,
 +
    Fmt_S,
 +
    Fmt_S>;
 +
 
 +
  // FR - VV unmasked - 32 bit float
 +
  def VV_U_32 : FR_OneOp_Unmasked_32<
 +
    (outs VR512W:$dst),
 +
    (ins VR512W:$src0),
 +
    operator # "_f32 $dst, $src0",
 +
    [(set v16f32:$dst, (OpNode v16f32:$src0))],
 +
    opcode,
 +
    Fmt_V,
 +
    Fmt_V>;
 +
 
 +
  let Constraints = "$dst = $oldvalue" in {
 +
    // FR - VV masked - 32 bit float
 +
    def VV_M_32 : FR_OneOp_Masked_32<
 +
      (outs VR512W:$dst),
 +
      (ins VR512W:$src0, VR512W:$oldvalue),
 +
      operator # "_f32.m $dst, $src0",
 +
      [(set v16f32:$dst, (int_npu_vector_mixi32 (OpNode v16i32:$src0),
 +
        v16i32:$oldvalue))],
 +
      opcode,
 +
      Fmt_V,
 +
      Fmt_V>;
 +
}
 +
}
 +
 
 +
// Condition codes defined in include/llvm/CodeGen/ISDOpcodes.h
 +
// VS and VV comparisons are handled through intrinsics
 +
multiclass FCompInt<string operator, CondCode condition,
 +
  bits<6> opcode, Intrinsic vectorIntr32> {
 +
  // FR - SSS - 32 bit integer
 +
  def SSS_32 : FR_TwoOp_Unmasked_32<
 +
    (outs GPR32:$dst),
 +
    (ins GPR32:$src0, GPR32:$src1),
 +
    operator # "_i32 $dst, $src0, $src1",
 +
    [(set i32:$dst, (setcc i32:$src0, i32:$src1, condition))],
 +
    opcode,
 +
    Fmt_S,
 +
    Fmt_S,
 +
    Fmt_S>;
 +
 
 +
  // FR - SVS unmasked - 32 bit integer
 +
  def SVS_U_32 : FR_TwoOp_Unmasked_32<
 +
    (outs GPR32:$dst),
 +
    (ins VR512W:$src0, GPR32:$src1),
 +
    operator # "_i32 $dst, $src0, $src1",
 +
    [(set i32:$dst, (vectorIntr32 v16i32:$src0, (splat i32:$src1)))],
 +
    opcode,
 +
    Fmt_S,
 +
    Fmt_V,
 +
    Fmt_S>;
 +
 
 +
  // FR - SVV unmasked - 32 bit integer
 +
  def SVV_U_32 : FR_TwoOp_Unmasked_32<
 +
    (outs GPR32:$dst),
 +
    (ins VR512W:$src0, VR512W:$src1),
 +
    operator # "_i32 $dst, $src0, $src1",
 +
    [(set i32:$dst, (vectorIntr32 v16i32:$src0, v16i32:$src1))],
 +
    opcode,
 +
    Fmt_S,
 +
    Fmt_V,
 +
    Fmt_V>;
 +
 
 +
  // FI - SSI
 +
  def SSI : FI_OneOp_Unmasked<
 +
    (outs GPR32:$dst),
 +
    (ins GPR32:$src, SIMM9OP:$imm),
 +
    operator # "i $dst, $src, $imm",
 +
    [(set i32:$dst, (setcc i32:$src, simm9:$imm, condition))],
 +
    opcode{4-0},
 +
    Fmt_S,
 +
    Fmt_S>;
 +
 
 +
  // FI - SVI unmasked
 +
  def SVI : FI_OneOp_Unmasked<
 +
    (outs GPR32:$dst),
 +
    (ins VR512W:$src, SIMM9OP:$imm),
 +
    operator # "i $dst, $src, $imm",
 +
    [(set i32:$dst, (vectorIntr32 v16i32:$src, (splat simm9:$imm)))],
 +
    opcode{4-0},
 +
    Fmt_S,
 +
    Fmt_V>;
 +
}
 +
 
 +
multiclass FCompFloat<string operator, SDNode OpNode,
 +
  bits<6> opcode, Intrinsic vectorIntr32> {
 +
  // FR - SSS - 32 bit float
 +
  def SSS_32 : FR_TwoOp_Unmasked_32<
 +
    (outs GPR32:$dst),
 +
    (ins GPR32:$src0, GPR32:$src1),
 +
    operator # "_f32 $dst, $src0, $src1",
 +
    [(set i32:$dst, (OpNode f32:$src0, f32:$src1))],
 +
    opcode,
 +
    Fmt_S,
 +
    Fmt_S,
 +
    Fmt_S>;
 +
 
 +
  // FR - SVS unmasked - 32 bit float
 +
  def SVS_U_32 : FR_TwoOp_Unmasked_32<
 +
    (outs GPR32:$dst),
 +
    (ins VR512W:$src0, GPR32:$src1),
 +
    operator # "_f32 $dst, $src0, $src1",
 +
    [(set i32:$dst, (vectorIntr32 v16f32:$src0, (splat f32:$src1)))],
 +
    opcode,
 +
    Fmt_S,
 +
    Fmt_V,
 +
    Fmt_S>;
 +
 
 +
  // FR - SVV unmasked - 32 bit float
 +
  def SVV_U_32 : FR_TwoOp_Unmasked_32<
 +
    (outs GPR32:$dst),
 +
    (ins VR512W:$src0, VR512W:$src1),
 +
    operator # "_f32 $dst, $src0, $src1",
 +
    [(set i32:$dst, (vectorIntr32 v16f32:$src0, v16f32:$src1))],
 +
    opcode,
 +
    Fmt_S,
 +
    Fmt_V,
 +
    Fmt_V>;
 +
 
 +
}
 +
 
 +
multiclass FSext_32<string operator, ValueType vt,
 +
  bits<6> opcode, ValueType vt_v> {
 +
  // FR - SS
 +
  def SS : FR_OneOp_Unmasked_32<
 +
    (outs GPR32:$dst),
 +
    (ins GPR32:$src0),
 +
    operator # "_i32 $dst, $src0",
 +
    [(set i32:$dst, (sext_inreg i32:$src0, vt))],
 +
    opcode,
 +
    Fmt_S,
 +
    Fmt_S>;
 +
 
 +
  // FR - VV unmasked
 +
  def VV_U : FR_OneOp_Unmasked_32<
 +
    (outs VR512W:$dst),
 +
    (ins VR512W:$src0),
 +
    operator # "_i32 $dst, $src0",
 +
    [(set v16i32:$dst, (sext vt_v:$src0))],
 +
    opcode,
 +
    Fmt_V,
 +
    Fmt_V>;
 +
 
 +
  let Constraints = "$dst = $oldvalue" in {
 +
    // FR - VV masked
 +
    def VV_M : FR_OneOp_Masked_32<
 +
      (outs VR512W:$dst),
 +
      (ins VR512W:$src0, VR512W:$oldvalue),
 +
      operator # "_i32.m $dst, $src0",
 +
      [(set v16i32:$dst, (int_npu_vector_mixi32 (sext vt_v:$src0),
 +
        v16i32:$oldvalue))],
 +
      opcode,
 +
      Fmt_V,
 +
      Fmt_V>;
 +
  }
 +
}
 +
 
 +
//===----------------------------------------------------------------------===//
 +
//  A set of multiclasses used to handle Loads and Stores
 +
//===----------------------------------------------------------------------===//
 +
 
 +
// Scalar LOAD
 +
multiclass FMLoadScalar_32<string suffix, PatFrag op_mem, PatFrag op_scratch, bits<6> opcode> {
 +
 
 +
  def _Mainmem : FM_Unmasked_Mainmem<
 +
    (outs GPR32:$dstsrc),  
 +
    (ins MEMri:$addr),  
 +
    "load32" # suffix # " $dstsrc, $addr",
 +
    [(set i32:$dstsrc, (i32 (op_mem ADDRri:$addr)))],
 +
    opcode,
 +
    0>;
 +
 
 +
  def _Scratchpad : FM_Unmasked_Scratchpad<
 +
    (outs GPR32:$dstsrc),
 +
    (ins MEMri:$addr),
 +
    "load32" # suffix # "_scratchpad $dstsrc, $addr",
 +
    [(set i32:$dstsrc, (i32 (op_scratch ADDRri:$addr)))],
 +
    opcode,
 +
    0>;
 +
 
 +
}
 +
 
 +
// Scalar STORE
 +
multiclass FMStoreScalar_32<string suffix, PatFrag op_mem, PatFrag op_scratch, bits<6> opcode> {
 +
 
 +
  def _Mainmem : FM_Unmasked_Mainmem<
 +
    (outs),
 +
    (ins GPR32:$dstsrc, MEMri:$addr),
 +
    "store32" # suffix # " $dstsrc, $addr",
 +
    [(op_mem i32:$dstsrc, ADDRri:$addr)],
 +
    opcode,
 +
    0>;
 +
 
 +
  def _Scratchpad : FM_Unmasked_Scratchpad<
 +
    (outs),
 +
    (ins GPR32:$dstsrc, MEMri:$addr),
 +
    "store32" # suffix # "_scratchpad $dstsrc, $addr",
 +
    [(op_scratch i32:$dstsrc, ADDRri:$addr)],
 +
    opcode,
 +
    0>{
 +
        let hasSideEffects = 1;
 +
        let mayStore = 1;
 +
      }
 +
}
 +
 
 +
// Vector LOAD
 +
multiclass FMLoadVector_32<string suffix, PatFrag op_Umem, PatFrag op_Uscratch,
 +
                          Intrinsic op_Mmem, Intrinsic op_Mscratch, bits<6> opcode> {
 +
 
 +
  // main memory - unmasked - 32
 +
  def Mainmem_U : FM_Unmasked_Mainmem_32<
 +
    (outs VR512W:$dstsrc),
 +
    (ins MEMri:$addr),
 +
    "load" # suffix # " $dstsrc, $addr",
 +
    [(set v16i32:$dstsrc, (op_Umem ADDRri:$addr))],
 +
    opcode>;
 +
 
 +
  // scratchpad memory - unmasked - 32
 +
  def Scratchpad_U : FM_Unmasked_Scratchpad_32<
 +
    (outs VR512W:$dstsrc),
 +
    (ins MEMri:$addr),
 +
    "load" # suffix # "_scratchpad $dstsrc, $addr",
 +
    [(set v16i32:$dstsrc, (op_Uscratch ADDRri:$addr))],
 +
    opcode>;
 +
 
 +
  // main memory - masked - 32
 +
  def Mainmem_M : FM_Masked_Mainmem_32<
 +
    (outs VR512W:$dstsrc),
 +
    (ins MEMri:$addr),
 +
    "load" # suffix # ".m $dstsrc, $addr",
 +
    [(set v16i32:$dstsrc, (op_Mmem ADDRri:$addr))],
 +
    opcode>;
 +
 
 +
  // scratchpad memory - masked - 32
 +
  def Scratchpad_M : FM_Masked_Scratchpad_32<
 +
    (outs VR512W:$dstsrc),
 +
    (ins MEMri:$addr),
 +
    "load" # suffix # "_scratchpad.m $dstsrc, $addr",
 +
    [(set v16i32:$dstsrc, (op_Mscratch ADDRri:$addr))],
 +
    opcode>;
 +
}
 +
 
 +
// Vector GATHER
 +
multiclass FMGather_32<string suffix, Intrinsic op_Uscratch,
 +
                          Intrinsic op_Mscratch, bits<6> opcode> {
 +
 
 +
  // scratchpad memory - unmasked - 32
 +
  def Scratchpad_U : FM_Unmasked_Scratchpad_32<
 +
    (outs VR512W:$dstsrc),
 +
    (ins V16MEMri:$addr),
 +
    "loadg" # suffix # "_scratchpad $dstsrc, $addr",
 +
    [(set v16i32:$dstsrc, (op_Uscratch V16ADDRri:$addr))],
 +
    opcode>;
 +
  // scratchpad memory - masked - 32
 +
  def Scratchpad_M : FM_Masked_Scratchpad_32<
 +
    (outs VR512W:$dstsrc),
 +
    (ins V16MEMri:$addr),
 +
    "loadg" # suffix # "_scratchpad.m $dstsrc, $addr",
 +
    [(set v16i32:$dstsrc, (op_Mscratch V16ADDRri:$addr))],
 +
    opcode>;
 +
}
 +
 
 +
// Vector STORE
 +
multiclass FMStoreVector_32<string suffix, PatFrag op_Umem, PatFrag op_Uscratch,
 +
                          Intrinsic op_Mmem, Intrinsic op_Mscratch, bits<6> opcode> {
 +
 
 +
  // main memory - unmasked - 32
 +
  def Mainmem_U : FM_Unmasked_Mainmem_32<
 +
    (outs),
 +
    (ins VR512W:$dstsrc, MEMri:$addr),
 +
    "store" # suffix # " $dstsrc, $addr",
 +
    [(op_Umem v16i32:$dstsrc, ADDRri:$addr)],
 +
    opcode>;
 +
  // scratchpad memory - unmasked - 32
 +
  def Scratchpad_U : FM_Unmasked_Scratchpad_32<
 +
    (outs),
 +
    (ins VR512W:$dstsrc, MEMri:$addr),
 +
    "store" # suffix # "_scratchpad $dstsrc, $addr",
 +
    [(op_Uscratch v16i32:$dstsrc, ADDRri:$addr)],
 +
    opcode>;
 +
  // main memory - masked - 32
 +
  def Mainmem_M : FM_Masked_Mainmem_32<
 +
    (outs),
 +
    (ins VR512W:$dstsrc, MEMri:$addr),
 +
    "store" # suffix # ".m $dstsrc, $addr",
 +
    [(op_Mmem ADDRri:$addr, v16i32:$dstsrc)],
 +
    opcode>;
 +
  // scratchpad memory - masked - 32
 +
  def Scratchpad_M : FM_Masked_Scratchpad_32<
 +
    (outs),
 +
    (ins VR512W:$dstsrc, MEMri:$addr),
 +
    "store" # suffix # "_scratchpad.m $dstsrc, $addr",
 +
    [(op_Mscratch ADDRri:$addr, v16i32:$dstsrc)],
 +
    opcode>;
 +
}
 +
 
 +
// Vector SCATTER
 +
multiclass FMScatter_32<string suffix, Intrinsic op_Uscratch,
 +
                          Intrinsic op_Mscratch, bits<6> opcode> {
 +
 
 +
  // scratchpad memory - unmasked - 32
 +
  def Scratchpad_U : FM_Unmasked_Scratchpad_32<
 +
    (outs),
 +
    (ins VR512W:$dstsrc, V16MEMri:$addr),
 +
    "stores" # suffix # "_scratchpad $dstsrc, $addr",
 +
    [(op_Uscratch V16ADDRri:$addr, v16i32:$dstsrc)],
 +
    opcode>;
 +
 
 +
  // scratchpad memory - masked - 32
 +
  def Scratchpad_M : FM_Masked_Scratchpad_32<
 +
    (outs),
 +
    (ins VR512W:$dstsrc, V16MEMri:$addr),
 +
    "stores" # suffix # "_scratchpad.m $dstsrc, $addr",
 +
    [(op_Mscratch V16ADDRri:$addr, v16i32:$dstsrc)],
 +
    opcode>;
 +
}
 +
 
 +
//===----------------------------------------------------------------------===//
 +
//  A set of multiclasses is used to handle Vector/Scalar
 +
//  Masked/Unmasked combinations
 +
//  MOVEI operations
 +
//===----------------------------------------------------------------------===//
 +
 
 +
multiclass FMOVEI_ALL<string operator, bits<3> opcode> {
 +
  // SI
 +
  def SI : FMOVEI<
 +
    (outs GPR32:$dst),
 +
    (ins SIMM16OP:$imm),
 +
    operator # " $dst, $imm",
 +
    [],//[(set i32:$dst, simm16:$imm)],
 +
    opcode,
 +
    Fmt_S,
 +
    0>;
 +
 
 +
  // VI unmasked
 +
  def VI_U : FMOVEI<
 +
    (outs VR512W:$dst),
 +
    (ins SIMM16OP:$imm),
 +
    operator # " $dst, $imm",
 +
    [],//[(set v16i32:$dst, (splat simm16:$imm))],
 +
    opcode,
 +
    Fmt_V,
 +
    0>;
 +
 
 +
let Constraints = "$dst = $oldvalue", Uses = [MR_REG] in {
 +
    // VI masked
 +
    def VI_M : FMOVEI<
 +
      (outs VR512W:$dst),
 +
      (ins SIMM16OP:$imm, VR512W:$oldvalue),
 +
      operator # ".m $dst, $imm",
 +
      [(set v16i32:$dst, (int_npu_vector_mixi32 (splat simm16:$imm), v16i32:$oldvalue))],
 +
      opcode,
 +
      Fmt_V,
 +
      1>;
 +
  }
 +
}
 +
 
 +
 
 +
 
 +
//===----------------------------------------------------------------------===//
 +
// Instruction class used to read/write special register through intrinics
 +
// All these instructions are implemented using a move instruction
 +
//===----------------------------------------------------------------------===//
 +
let DecoderNamespace = "Read_SPR" in {
 +
  class READ_SPR<SpReg reg, string operator, Intrinsic read_intr> :
 +
                FR_OneOp_Unmasked_32<
 +
                  (outs GPR32:$dst),
 +
                  (ins),
 +
                  operator # " $dst",
 +
                  [(set i32:$dst, (read_intr))],
 +
                  32,
 +
                  Fmt_S,
 +
                  Fmt_S>
 +
  {
 +
    bits<6> dst;
 +
 
 +
    let Inst{29-24} = 32; // opcode: move
 +
    let Inst{23-18} = dst;
 +
    let Inst{17-12} = reg.Register;
 +
    let Inst{11-6} = 0;
 +
  }
 +
}
 +
let DecoderNamespace = "Write_SPR" in {
 +
  class WRITE_SPR<SpReg reg, string operator, Intrinsic read_intr> :
 +
                FR_OneOp_Unmasked_32<
 +
                  (outs),
 +
                  (ins GPR32:$src),
 +
                  operator # " $src",
 +
                  [(read_intr i32:$src)],
 +
                  32,
 +
                  Fmt_S,
 +
                  Fmt_S>
 +
  {
 +
    bits<6> src;
 +
 
 +
    let Inst{29-24} = 32; // opcode: move
 +
    let Inst{23-18} = reg.Register;
 +
    let Inst{17-12} = src;
 +
    let Inst{11-6} = 0;
 +
  }
 +
}
  
For more complex patterns that require pattern matching code in C++, LLVM provides the '''ComplexPattern''' class. It takes the '''number of operands''' returned by the select function, the '''name of the function''' used to pattern match the max pattern(usually defined in the TargetNameDAGToDAGISel class), the '''list of possible root nodes''' of the sub-dags to match and the '''list of possible predicates'''.
 
  
<syntaxhighlight lang="c" line='line'>
+
//===----------------------------------------------------------------------===//
 +
//  Pseudo-instructions for alternate assembly syntax (never used by codegen).
 +
//  These are aliases that require C++ handling to convert to the target
 +
//  instruction, while InstAliases can be handled directly by tblgen.
 +
//===----------------------------------------------------------------------===//
 +
class AsmPseudoInst<dag outs, dag ins, string asm>
 +
  : InstNaplesPU<outs, ins, asm, []> {
 +
  let isPseudo = 1;
 +
}
  
class ComplexPattern<ValueType ty, int numops, string fn,
+
class Pseudo<dag outs, dag ins, list<dag> pattern>
                    list<SDNode> roots = [], list<SDNodeProperty> props = []> {
+
   : InstNaplesPU<outs, ins, "Pseudo", pattern>
   ValueType Ty = ty;
+
{
  int NumOperands = numops;
+
   let isCodeGenOnly = 1;
   string SelectFunc = fn;
+
   let isPseudo = 1;
   list<SDNode> RootNodes = roots;
+
   let Inst{31-0} = 0;
   list<SDNodeProperty> Properties = props;
 
 
}
 
}
</syntaxhighlight>
 
  
In the nu+ backend, it is used for the addressing modes, the '''SelectADDRri''' function is defined in the NuPlusDAGToDAGISel class inside the [[NuPlusISelDAGToDAG.cpp]] file.
+
multiclass AtomicBinary<SDNode OpNode>
 +
{
 +
  def R : Pseudo<
 +
    (outs GPR32:$dst),
 +
    (ins GPR32:$ptr, GPR32:$amt),
 +
    [(set i32:$dst, (OpNode GPR32:$ptr, GPR32:$amt))]>;
 +
 
 +
  def I : Pseudo<
 +
    (outs GPR32:$dst),
 +
    (ins GPR32:$ptr, SIMM9OP:$amt),
 +
    [(set i32:$dst, (OpNode GPR32:$ptr, simm9:$amt))]>;
 +
}
  
<syntaxhighlight lang="c" line='line'>
 
def ADDRri : ComplexPattern<iPTR, 2, "SelectADDRri", [frameindex], []>;
 
def V16ADDRri : ComplexPattern<v16i32, 2, "SelectADDRri", [], []>;
 
def V8ADDRri : ComplexPattern<v8i64, 2, "SelectADDRri", [], []>;
 
 
</syntaxhighlight>
 
</syntaxhighlight>

Latest revision as of 17:01, 21 June 2019

The NaplesPUInstrFormats.td contains the classes that describe the NaplesPU instruction formats, support classes to facilitate the instructions definition and also the definition nodes which make the pattern recognition easier.

//===-- NaplesPUInstrFormats.td - NaplesPU Instruction Formats ---*- tablegen -*-===//
//
//                     The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//

//===----------------------------------------------------------------------===//
// Instruction Pattern Stuff
//===----------------------------------------------------------------------===//

def simm16  : PatLeaf<(imm), [{ return isInt<16>(N->getSExtValue()); }]>;
def simm9  : PatLeaf<(imm), [{ return isInt<9>(N->getSExtValue()); }]>;

// Addressing modes as in SPARC
def ADDRri : ComplexPattern<iPTR, 2, "SelectADDRri", [frameindex], []>;
def V16ADDRri : ComplexPattern<v16i32, 2, "SelectADDRri", [], []>;
def V8ADDRri : ComplexPattern<v8i64, 2, "SelectADDRri", [], []>;

//===----------------------------------------------------------------------===//
// NaplesPU profiles and nodes
//===----------------------------------------------------------------------===//
// Transformation nodes
def LO32I : SDNodeXForm<imm, [{
  return CurDAG->getTargetConstant((unsigned)N->getAPIntValue().getLoBits(32).getZExtValue(), SDLoc(N), MVT::i32);}]>;

def HI32I : SDNodeXForm<imm, [{
  // Transformation function: shift the immediate value down into the low bits.
  return CurDAG->getTargetConstant((unsigned)N->getAPIntValue().getHiBits(32).getZExtValue(), SDLoc(N),  MVT::i32);}]>;

def LO32F : SDNodeXForm<fpimm, [{
  return CurDAG->getTargetConstant((unsigned)(N->getValueAPF().bitcastToAPInt().getLoBits(32).getZExtValue()), SDLoc(N), MVT::i32);}]>;

def HI32F : SDNodeXForm<fpimm, [{
  // Transformation function: shift the immediate value down into the low bits.
  return CurDAG->getTargetConstant((unsigned)(N->getValueAPF().bitcastToAPInt().getHiBits(32).getZExtValue()), SDLoc(N),  MVT::i32);}]>;

def DIV2 : SDNodeXForm<imm, [{
  return CurDAG->getTargetConstant((unsigned)N->getZExtValue() / 2, SDLoc(N), MVT::i32);}]>;

// Moveil/moveih nodes definition, used for globaladdress lowering
def leah  : SDNode<"NaplesPUISD::LEAH", SDTypeProfile<1, 1, []>>;
def leal  : SDNode<"NaplesPUISD::LEAL", SDTypeProfile<1, 2, []>>;

// A splat is a vector with the same value in all lanes. Used to handle operation
// with both vector and scalar operands.
def splat : SDNode<"NaplesPUISD::SPLAT", SDTypeProfile<1, 1, [SDTCisEltOfVec<1, 0>]>>;

def return : SDNode<"NaplesPUISD::RET_FLAG", SDTypeProfile<0, 0, []>, 
	[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;

def SDT_SPCall : SDTypeProfile<0, -1, [SDTCisVT<0, i32>]>;

def call : SDNode<"NaplesPUISD::CALL", SDT_SPCall, 
	[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, SDNPVariadic]>;

//To mark the beginning and end of a call sequence
def SDT_SPCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
 def SDT_SPCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
 def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_SPCallSeqStart,
                            [SDNPHasChain, SDNPSideEffect, SDNPOutGlue]>;
 def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_SPCallSeqEnd,
                            [SDNPHasChain, SDNPSideEffect,
                             SDNPOptInGlue, SDNPOutGlue]>; 
//To handle the lack of conditional moves
def selcondresult : SDNode<"NaplesPUISD::SEL_COND_RESULT", SDTypeProfile<1, 3, 
							[SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>]>>;

//===----------------------------------------------------------------------===//
// Operand Definitions
//===----------------------------------------------------------------------===//

// Used for the LEA_Sym, to detect the lea pseudo instruction
def symref : Operand<OtherVT> {}

def SIMM16OP : Operand<i32> {
  let DecoderMethod = "decodeSimm16Value";
}

def SIMM9OP : Operand<i32> {
  let DecoderMethod = "decodeSimm9Value";
}

def MemAsmOperand : AsmOperandClass {
  let Name = "Mem";
  let ParserMethod = "ParseMemoryOperand";
}

def MEMri : Operand<iPTR> {
  let PrintMethod = "printMemOperand";
  let EncoderMethod = "encodeMemoryOpValue";
  let DecoderMethod = "decodeScalarMemoryOpValue";
  let ParserMatchClass = MemAsmOperand;
  let MIOperandInfo = (ops GPR32, i32imm);
}

def V16MEMri : Operand<v16i32> {
  let PrintMethod = "printMemOperand";
  let EncoderMethod = "encodeMemoryOpValue";
  let DecoderMethod = "decodeVectorWMemoryOpValue";
  let ParserMatchClass = MemAsmOperand;
  let MIOperandInfo = (ops VR512W, i32imm);
}

def LEAri : Operand<iPTR> {
  let PrintMethod = "printMemOperand";
  let EncoderMethod = "encodeLEAValue";
  let ParserMatchClass = MemAsmOperand; //TODO: controllare se è corretto il ParserMatchClass
  let MIOperandInfo = (ops GPR32, i32imm);
}

def ABSh : Operand<iPTR> {
  let PrintMethod = "printMemOperand";
  let EncoderMethod = "encodeABShValue";
  let ParserMatchClass = MemAsmOperand; //TODO: controllare se è corretto il ParserMatchClass
  let MIOperandInfo = (ops i32imm);
}

def ABSl : Operand<iPTR> {
  let PrintMethod = "printMemOperand";
  let EncoderMethod = "encodeABSlValue";
  let ParserMatchClass = MemAsmOperand; //TODO: controllare se è corretto il ParserMatchClass
  let MIOperandInfo = (ops i32imm);
}


def brtarget : Operand<OtherVT>
{
  let EncoderMethod = "encodeBranchTargetOpValue";
  let DecoderMethod = "decodeBranchTargetOpValue";
}

def calltarget : Operand<iPTR>
{
  let EncoderMethod = "encodeBranchTargetOpValue";
  let DecoderMethod = "decodeBranchTargetOpValue";
}

//===----------------------------------------------------------------------===//
// Pattern fragments
//===----------------------------------------------------------------------===//
// Definition of anyextload used in the loading of vector types < 512 bits
def anyextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), 
    [{ return cast<LoadSDNode>(N)->getExtensionType() != ISD::NON_EXTLOAD;}]>; 

//----------------------------------------------------------------------------// 
//------------------------------ LOAD AND STORE ------------------------------// 
//----------------------------------------------------------------------------// 

def MemStore : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{
                            if(cast<StoreSDNode>(N)->getAddressSpace() != 77)
                              return !cast<StoreSDNode>(N)->isTruncatingStore();
                            else
                              return false;}]>;

def MemLoad : PatFrag<(ops node:$ptr), (load node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
            return cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
          else
            return false;}]>;

def ScratchpadStore : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{
                            if(cast<StoreSDNode>(N)->getAddressSpace() == 77)
                              return !cast<StoreSDNode>(N)->isTruncatingStore();
                            else
                              return false;}]>;

def ScratchpadLoad : PatFrag<(ops node:$ptr), (load node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
            return cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
          else
            return false;}]>;

//---------------- EXTLOAD scalar ----------------// 
def extloadi1_mem : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
          else
            return false;}]>;
def extloadi1_scratch : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
          else
            return false;}]>;

def extloadi8_mem : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
          else
            return false;}]>;
def extloadi8_scratch : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
          else
            return false;}]>;

def extloadi16_mem : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
          else
            return false;}]>;
def extloadi16_scratch : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
          else
            return false;}]>;

def extloadi32_mem : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
          else
            return false;}]>;
def extloadi32_scratch : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
          else
            return false;}]>;

//---------------- ZEXTLOAD scalar ----------------// 
def zextloadi1_mem : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
          else
            return false;}]>;
def zextloadi1_scratch : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
          else
            return false;}]>;

def zextloadi8_mem : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
          else
            return false;}]>;
def zextloadi8_scratch : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
          else
            return false;}]>;

def zextloadi16_mem : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
          else
            return false;}]>;
def zextloadi16_scratch : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
          else
            return false;}]>;

def zextloadi32_mem : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
          else
            return false;}]>;
def zextloadi32_scratch : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
          else
            return false;}]>;

//---------------- ZEXTLOAD vector ----------------// 
def zextloadv16i8_mem: PatFrag<(ops node:$ptr), (zextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i8;
      else
        return false; }]>;
def zextloadv16i8_scratch: PatFrag<(ops node:$ptr), (zextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
         return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i8;
       else
         return false; }]>;

def zextloadv16i16_mem: PatFrag<(ops node:$ptr), (zextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i16;
      else
        return false; }]>;
def zextloadv16i16_scratch: PatFrag<(ops node:$ptr), (zextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
         return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i16;
       else
         return false; }]>;

def zextloadv8i8_mem: PatFrag<(ops node:$ptr), (zextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i8;
      else
        return false; }]>;
def zextloadv8i8_scratch: PatFrag<(ops node:$ptr), (zextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
         return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i8;
       else
         return false; }]>;

def zextloadv8i16_mem: PatFrag<(ops node:$ptr), (zextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i16;
      else
        return false; }]>;
def zextloadv8i16_scratch: PatFrag<(ops node:$ptr), (zextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
         return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i16;
       else
         return false; }]>;

def zextloadv8i32_mem: PatFrag<(ops node:$ptr), (zextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i32;
      else
        return false; }]>;
def zextloadv8i32_scratch: PatFrag<(ops node:$ptr), (zextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
         return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i32;
       else
         return false; }]>;                  


//---------------- SEXTLOAD scalar ----------------// 
def sextloadi1_mem : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
          else
            return false;}]>;
def sextloadi1_scratch : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
          else
            return false;}]>;

def sextloadi8_mem : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
          else
            return false;}]>;
def sextloadi8_scratch : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
          else
            return false;}]>;

def sextloadi16_mem : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
          else
            return false;}]>;
def sextloadi16_scratch : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
          else
            return false;}]>;

def sextloadi32_mem : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
          else
            return false;}]>;
def sextloadi32_scratch : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
          if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
            return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
          else
            return false;}]>;
			
//---------------- SEXTLOAD vector ----------------// 
def sextloadv16i8_mem: PatFrag<(ops node:$ptr), (sextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i8;
      else
        return false; }]>;
def sextloadv16i8_scratch: PatFrag<(ops node:$ptr), (sextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
         return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i8;
       else
         return false; }]>;

def sextloadv16i16_mem: PatFrag<(ops node:$ptr), (sextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i16;
      else
        return false; }]>;
def sextloadv16i16_scratch: PatFrag<(ops node:$ptr), (sextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
         return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i16;
       else
         return false; }]>;

def sextloadv8i8_mem: PatFrag<(ops node:$ptr), (sextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i8;
      else
        return false; }]>;
def sextloadv8i8_scratch: PatFrag<(ops node:$ptr), (sextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
         return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i8;
       else
         return false; }]>;

def sextloadv8i16_mem: PatFrag<(ops node:$ptr), (sextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i16;
      else
        return false; }]>;
def sextloadv8i16_scratch: PatFrag<(ops node:$ptr), (sextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
         return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i16;
       else
         return false; }]>;

def sextloadv8i32_mem: PatFrag<(ops node:$ptr), (sextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i32;
      else
        return false; }]>;
def sextloadv8i32_scratch: PatFrag<(ops node:$ptr), (sextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
         return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i32;
       else
         return false; }]>;                                    

		 			
//---------------- ANYEXTLOAD vector ----------------// 
def anyextloadv16i8_mem: PatFrag<(ops node:$ptr), (anyextload node:$ptr), 
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
         return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i8;
       else
         return false; }]>;
def anyextloadv16i8_scratch: PatFrag<(ops node:$ptr), (anyextload node:$ptr), 
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
         return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i8;
       else
         return false; }]>;

def anyextloadv16i16_mem: PatFrag<(ops node:$ptr), (anyextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i16;
      else
        return false; }]>;
def anyextloadv16i16_scratch: PatFrag<(ops node:$ptr), (anyextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
         return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v16i16;
       else
         return false; }]>;

def anyextloadv8i8_mem: PatFrag<(ops node:$ptr), (anyextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i8;
      else
        return false; }]>;
def anyextloadv8i8_scratch: PatFrag<(ops node:$ptr), (anyextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
         return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i8;
       else
         return false; }]>;         

def anyextloadv8i16_mem: PatFrag<(ops node:$ptr), (anyextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i16;
      else
        return false; }]>;
def anyextloadv8i16_scratch: PatFrag<(ops node:$ptr), (anyextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
         return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i16;
       else
         return false; }]>;

def anyextloadv8i32_mem: PatFrag<(ops node:$ptr), (anyextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() != 77)
        return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i32;
      else
        return false; }]>;
def anyextloadv8i32_scratch: PatFrag<(ops node:$ptr), (anyextload node:$ptr),
    [{ if(cast<LoadSDNode>(N)->getAddressSpace() == 77)
         return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v8i32;
       else
         return false; }]>;

		 
//---------------- TRUNCSTORE scalar ----------------// 
def truncstorei1_mem : PatFrag<(ops node:$val, node:$ptr),
                           (truncstore node:$val, node:$ptr), [{
          if(cast<StoreSDNode>(N)->getAddressSpace() != 77)
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
          else
            return false;}]>;
def truncstorei1_scratch : PatFrag<(ops node:$val, node:$ptr),
                           (truncstore node:$val, node:$ptr), [{
          if(cast<StoreSDNode>(N)->getAddressSpace() == 77)
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
          else
            return false;}]>;

def truncstorei8_mem : PatFrag<(ops node:$val, node:$ptr),
                           (truncstore node:$val, node:$ptr), [{
          if(cast<StoreSDNode>(N)->getAddressSpace() != 77)
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
          else
            return false;}]>;
def truncstorei8_scratch : PatFrag<(ops node:$val, node:$ptr),
                           (truncstore node:$val, node:$ptr), [{
          if(cast<StoreSDNode>(N)->getAddressSpace() == 77)
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
          else
            return false;}]>;

def truncstorei16_mem : PatFrag<(ops node:$val, node:$ptr),
                           (truncstore node:$val, node:$ptr), [{
          if(cast<StoreSDNode>(N)->getAddressSpace() != 77)
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
          else
            return false;}]>;
def truncstorei16_scratch : PatFrag<(ops node:$val, node:$ptr),
                           (truncstore node:$val, node:$ptr), [{
          if(cast<StoreSDNode>(N)->getAddressSpace() == 77)
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
          else
            return false;}]>;

def truncstorei32_mem : PatFrag<(ops node:$val, node:$ptr),
                           (truncstore node:$val, node:$ptr), [{
          if(cast<StoreSDNode>(N)->getAddressSpace() != 77)
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
          else
            return false;}]>;
def truncstorei32_scratch : PatFrag<(ops node:$val, node:$ptr),
                           (truncstore node:$val, node:$ptr), [{
          if(cast<StoreSDNode>(N)->getAddressSpace() == 77)
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
          else
            return false;}]>;

//---------------- TRUNCSTORE vector ----------------// 
def truncstorev16i8_mem: PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr),
    [{ if(cast<StoreSDNode>(N)->getAddressSpace() != 77)
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v16i8;
          else
            return false; }]>;
def truncstorev16i8_scratch: PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr),
    [{ if(cast<StoreSDNode>(N)->getAddressSpace() == 77)
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v16i8;
          else
            return false; }]>;   

def truncstorev16i16_mem: PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr),
    [{ if(cast<StoreSDNode>(N)->getAddressSpace() != 77)
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v16i16;
          else
            return false; }]>;
def truncstorev16i16_scratch: PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr),
    [{ if(cast<StoreSDNode>(N)->getAddressSpace() == 77)
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v16i16;
          else
            return false; }]>;

def truncstorev8i8_mem: PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr),
    [{ if(cast<StoreSDNode>(N)->getAddressSpace() != 77)
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v8i8;
          else
            return false; }]>;
def truncstorev8i8_scratch: PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr),
    [{ if(cast<StoreSDNode>(N)->getAddressSpace() == 77)
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v8i8;
          else
            return false; }]>;

def truncstorev8i16_mem: PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr),
    [{ if(cast<StoreSDNode>(N)->getAddressSpace() != 77)
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v8i16;
          else
            return false; }]>;
def truncstorev8i16_scratch: PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr),
    [{ if(cast<StoreSDNode>(N)->getAddressSpace() == 77)
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v8i16;
          else
            return false; }]>;

def truncstorev8i32_mem: PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr),
    [{ if(cast<StoreSDNode>(N)->getAddressSpace() != 77)
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v8i32;
          else
            return false; }]>;
def truncstorev8i32_scratch: PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr),
    [{ if(cast<StoreSDNode>(N)->getAddressSpace() == 77)
            return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v8i32;
          else
            return false; }]>;                                                         

			
// insertelt SDNode redefinition
def VecInsert : SDTypeProfile<1, 3, [    // vector insert
   SDTCisSameAs<0, 1>, SDTCisPtrTy<3>
]>;

def insert_elt  : SDNode<"ISD::INSERT_VECTOR_ELT", VecInsert>;

//===----------------------------------------------------------------------===//
//  Describe NaplesPU Special Registers
//
//
//===----------------------------------------------------------------------===//

class SpReg<bits<6> reg> {
  bits<6> Register = reg;
}

def MaskReg : SpReg<59>;

//===----------------------------------------------------------------------===//
//  Describe NaplesPU scalar or vector instructions
//
//  Fmt   - 0 if a register is scalar, 1 if vector
//===----------------------------------------------------------------------===//

class Fmt<bit val> {
  bit Value = val;
}

def Fmt_S : Fmt<0>;
def Fmt_V : Fmt<1>;

//===----------------------------------------------------------------------===//
// Describe NaplesPU instructions format here
//===----------------------------------------------------------------------===//

class InstNaplesPU<dag outs, dag ins, string asmstr, list<dag> pattern>
          : Instruction {
  field bits<32> Inst;

  let Namespace = "NaplesPU";
  let Size = 4;

  dag OutOperandList = outs;
  dag InOperandList = ins;
  let AsmString   = asmstr;
  let Pattern = pattern;

  //let DecoderNamespace = "NaplesPU";
  field bits<32> SoftFail = 0;
}


//===----------------------------------------------------------------------===//
// Format R instruction class in NaplesPU : <00|opcode|rd|rs0|rs1|unused|l|fmt|m|>
// l: if 1, 64 bit mode
// fmt2: FMT value for rd register
// fmt1: FMT value for rs0 register
// fmt0: FMT value for rs1 register
// m: if 1, masked
//
//===----------------------------------------------------------------------===//

class FR<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l, Fmt fmt2, Fmt fmt1, Fmt fmt0, bit m>
   : InstNaplesPU<outs, ins, asmstr, pattern> {
  bits <6> dst;
  bits <6> src0;

  let Inst{31-30} = 0;
  let Inst{29-24} = opcode;
  let Inst{23-18} = dst;
  let Inst{17-12} = src0;
  let Inst{5} = 0; //unused
  let Inst{4} = l;
  let Inst{3} = fmt2.Value;
  let Inst{2} = fmt1.Value;
  let Inst{1} = fmt0.Value;
  let Inst{0} = m;
}

class FR_TwoOp<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l, Fmt fmt2, Fmt fmt1, Fmt fmt0, bit m>
   : FR<outs, ins, asmstr, pattern, opcode, l, fmt2, fmt1, fmt0, m> {
  bits <6> src1;

  let Inst{11-6} = src1;
}

class FR_OneOp<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l, Fmt fmt2, Fmt fmt1, Fmt fmt0, bit m>
   : FR<outs, ins, asmstr, pattern, opcode, l, fmt2, fmt1, fmt0, m> {

  let Inst{11-6} = 0;
}

class FR_TwoOp_Masked<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l, Fmt fmt2, Fmt fmt1, Fmt fmt0>
   : FR_TwoOp<outs, ins, asmstr, pattern, opcode, l, fmt2, fmt1, fmt0, 1> {
  let Uses = [MR_REG];
}

class FR_TwoOp_Unmasked<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l, Fmt fmt2, Fmt fmt1, Fmt fmt0>
   : FR_TwoOp<outs, ins, asmstr, pattern, opcode, l, fmt2, fmt1, fmt0, 0> {

}

class FR_OneOp_Masked<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l, Fmt fmt2, Fmt fmt1>
   : FR_OneOp<outs, ins, asmstr, pattern, opcode, l, fmt2, fmt1, Fmt_S, 1> {
  let Uses = [MR_REG];
}

class FR_OneOp_Unmasked<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l, Fmt fmt2, Fmt fmt1>
   : FR_OneOp<outs, ins, asmstr, pattern, opcode, l, fmt2, fmt1, Fmt_S, 0> {

}

class FR_TwoOp_Masked_32<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, Fmt fmt2, Fmt fmt1, Fmt fmt0>
   : FR_TwoOp_Masked<outs, ins, asmstr, pattern, opcode, 0, fmt2, fmt1, fmt0> {

}

class FR_TwoOp_Masked_64<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, Fmt fmt2, Fmt fmt1, Fmt fmt0>
   : FR_TwoOp_Masked<outs, ins, asmstr, pattern, opcode, 1, fmt2, fmt1, fmt0> {

}

class FR_TwoOp_Unmasked_32<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, Fmt fmt2, Fmt fmt1, Fmt fmt0>
   : FR_TwoOp_Unmasked<outs, ins, asmstr, pattern, opcode, 0, fmt2, fmt1, fmt0> {

}

class FR_TwoOp_Unmasked_64<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, Fmt fmt2, Fmt fmt1, Fmt fmt0>
   : FR_TwoOp_Unmasked<outs, ins, asmstr, pattern, opcode, 1, fmt2, fmt1, fmt0> {

}

class FR_OneOp_Masked_32<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, Fmt fmt2, Fmt fmt1>
   : FR_OneOp_Masked<outs, ins, asmstr, pattern, opcode, 0, fmt2, fmt1> {

}

class FR_OneOp_Masked_64<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, Fmt fmt2, Fmt fmt1>
   : FR_OneOp_Masked<outs, ins, asmstr, pattern, opcode, 1, fmt2, fmt1> {

}

class FR_OneOp_Unmasked_32<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, Fmt fmt2, Fmt fmt1>
   : FR_OneOp_Unmasked<outs, ins, asmstr, pattern, opcode, 0, fmt2, fmt1> {

}

class FR_OneOp_Unmasked_64<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, Fmt fmt2, Fmt fmt1>
   : FR_OneOp_Unmasked<outs, ins, asmstr, pattern, opcode, 1, fmt2, fmt1> {

}

//===----------------------------------------------------------------------===//
// Format I instruction class in NaplesPU : <010|opcode|rd|rs|imm|fmt|m|>
// fmt1: FMT value for rd register
// fmt0: FMT value for rs register
// m: if 1 masked
//===----------------------------------------------------------------------===//

class FI<dag outs, dag ins, string asmstr, list<dag> pattern, bits<5> opcode, Fmt fmt1, Fmt fmt0, bit m>
    : InstNaplesPU<outs, ins, asmstr, pattern> {
  bits <6> dst;
  bits <9> imm;

  let Inst{31-29} = 0b010;
  let Inst{28-24} = opcode;
  let Inst{23-18} = dst;
  let Inst{11-3} = imm;
  let Inst{2} = fmt1.Value;
  let Inst{1} = fmt0.Value;
  let Inst{0} = m;
}

class FI_OneOp<dag outs, dag ins, string asmstr, list<dag> pattern, bits<5> opcode, Fmt fmt1, Fmt fmt0, bit m> : FI<outs, ins, asmstr, pattern, opcode, fmt1, fmt0, m> {
  bits <6> src;

  let Inst{17-12} = src;
}

class FI_NoOp<dag outs, dag ins, string asmstr, list<dag> pattern, bits<5> opcode, Fmt fmt1> : FI<outs, ins, asmstr, pattern, opcode, fmt1, Fmt_S, 0> {
  let Inst{17-12} = 0;
}

class FI_OneOp_Masked<dag outs, dag ins, string asmstr, list<dag> pattern, bits<5> opcode, Fmt fmt1, Fmt fmt0> : FI_OneOp<outs, ins, asmstr, pattern, opcode, fmt1, fmt0, 1> {
  let Uses = [MR_REG];
}

class FI_OneOp_Unmasked<dag outs, dag ins, string asmstr, list<dag> pattern, bits<5> opcode, Fmt fmt1, Fmt fmt0> : FI_OneOp<outs, ins, asmstr, pattern, opcode, fmt1, fmt0, 0> {

}

//===----------------------------------------------------------------------===//
// Format MOVEI instruction class in NaplesPU : <01100|opcode|rd|imm|fmt|m|>
//===----------------------------------------------------------------------===//

class FMOVEI<dag outs, dag ins, string asmstr, list<dag> pattern, bits<3> opcode, Fmt fmt, bit m>
      : InstNaplesPU<outs, ins, asmstr, pattern> {
  bits <6> dst;
  bits <16> imm;

  let Inst{31-27} = 0b01100;
  let Inst{26-24} = opcode;
  let Inst{23-18} = dst;
  let Inst{17-2} = imm;
  let Inst{1} = fmt.Value;
  let Inst{0} = m;
}

//===----------------------------------------------------------------------===//
// Format M instruction class in NaplesPU : <10|opcode|rd/rs|rptr|off|l|s|m|>
//===----------------------------------------------------------------------===//

class FM<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l, bit s, bit m>
    : InstNaplesPU<outs, ins, asmstr, pattern> {
  bits <6> dstsrc;
  bits <15> addr; //base address and offset encoded on the same 15 bits value (check encodeMemoryOpValue)

  let Inst{31-30} = 0b10;
  let Inst{29-24} = opcode;
  let Inst{23-18} = dstsrc;
  let Inst{17-12} = addr{5-0};
  let Inst{11-3} = addr{14-6};
  let Inst{2} = l;
  let Inst{1} = s;
  let Inst{0} = m;
}

class FM_Unmasked<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l, bit s>
   : FM<outs, ins, asmstr, pattern, opcode, l, s, 0> {
}

class FM_Masked<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l, bit s>
   : FM<outs, ins, asmstr, pattern, opcode, l, s, 1> {
  let Uses = [MR_REG];
}

class FM_Unmasked_Mainmem<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l>
   : FM_Unmasked<outs, ins, asmstr, pattern, opcode, l, 0> {
}

class FM_Unmasked_Scratchpad<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l>
   : FM_Unmasked<outs, ins, asmstr, pattern, opcode, l, 1> {
}

class FM_Masked_Mainmem<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l>
   : FM_Masked<outs, ins, asmstr, pattern, opcode, l, 0> {
}

class FM_Masked_Scratchpad<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode, bit l>
   : FM_Masked<outs, ins, asmstr, pattern, opcode, l, 1> {
}

class FM_Unmasked_Mainmem_32<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode>
   : FM_Unmasked_Mainmem<outs, ins, asmstr, pattern, opcode, 0> {
}

class FM_Unmasked_Scratchpad_32<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode>
   : FM_Unmasked_Scratchpad<outs, ins, asmstr, pattern, opcode, 0> {
}

class FM_Masked_Mainmem_32<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode>
   : FM_Masked_Mainmem<outs, ins, asmstr, pattern, opcode, 0> {
}

class FM_Masked_Scratchpad_32<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode>
   : FM_Masked_Scratchpad<outs, ins, asmstr, pattern, opcode, 0> {
}

class FM_Unmasked_Mainmem_64<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode>
   : FM_Unmasked_Mainmem<outs, ins, asmstr, pattern, opcode, 1> {
}

class FM_Unmasked_Scratchpad_64<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode>
   : FM_Unmasked_Scratchpad<outs, ins, asmstr, pattern, opcode, 1> {
}

class FM_Masked_Mainmem_64<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode>
   : FM_Masked_Mainmem<outs, ins, asmstr, pattern, opcode, 1> {
}

class FM_Masked_Scratchpad_64<dag outs, dag ins, string asmstr, list<dag> pattern, bits<6> opcode>
   : FM_Masked_Scratchpad<outs, ins, asmstr, pattern, opcode, 1> {
}


//===----------------------------------------------------------------------===//
// Format J/BR instruction class in NaplesPU 
// FJR: <0111|type(0/1)|opcode|rd|imm|>
// FJ:  <0111|type(0/1)|opcode|imm|>
//===----------------------------------------------------------------------===//

class FJ_ALL<dag outs, dag ins, string asmstr, list<dag> pattern, bits<3> opcode>
    : InstNaplesPU<outs, ins, asmstr, pattern> {

  let Inst{31-28} = 0b0111;
  let Inst{26-24} = opcode;
  let isBranch = 1;
}

class FJR<dag outs, dag ins, string asmstr, list<dag> pattern, bits<3> opcode>
    : FJ_ALL<outs, ins, asmstr, pattern, opcode> {
  bits <6> cond;
  bits <18> addr;

  let Inst{27} = 0;
  let Inst{23-18} = cond;
  let Inst{17-0} = addr;
}

class FJ<dag outs, dag ins, string asmstr, list<dag> pattern, bits<3> opcode>
    : FJ_ALL<outs, ins, asmstr, pattern, opcode> {
  bits <24> addr;

  let Inst{27} = 1;
  let Inst{23-0} = addr;
}

//===----------------------------------------------------------------------===//
// Format C instruction class in NaplesPU 
// FC:  <01101|opcode|rs0|rs1|unused|>
//===----------------------------------------------------------------------===//

class FC<dag outs, dag ins, string asmstr, list<dag> pattern, bits<3> opcode>
    : InstNaplesPU<outs, ins, asmstr, pattern> {

  bits <6> src0;
  bits <6> src1;

  let Inst{31-27} = 0b01101;
  let Inst{26-24} = opcode;
  let Inst{23-18} = src0;
  let Inst{17-12} = src1;
  let Inst{11-0} = 0; //unused
}

//===----------------------------------------------------------------------===//
//  A set of multiclasses is used to handle Vector/Scalar combinations
//   SS: Scalar = Op Scalar
//   VV: Vector = Op Vector
//   SI: Vector = Op Immediate
//  SSS: Scalar = Scalar Op Scalar
//  VVS: Vector = Vector Op Scalar
//  VVV: Vector = Vector Op Vector
//  SVV: Scalar = Vector Op Vector
//  SSI: Scalar = Vector Op Immediate
//  VVI: Scalar = Vector Op Immediate
//===----------------------------------------------------------------------===//

multiclass FArithInt_TwoOp<string operator, SDNode OpNode, bits<6> opcode> {
  // FR - SSS - 32 bit integer
  def SSS_32 : FR_TwoOp_Unmasked_32<
    (outs GPR32:$dst),
    (ins GPR32:$src0, GPR32:$src1),
    operator # "_i32 $dst, $src0, $src1",
    [(set i32:$dst, (OpNode i32:$src0, i32:$src1))],
    opcode,
    Fmt_S,
    Fmt_S,
    Fmt_S>;

  // FR - VVS unmasked - 32 bit integer
  def VVS_U_32 : FR_TwoOp_Unmasked_32<
    (outs VR512W:$dst),
    (ins VR512W:$src0, GPR32:$src1),
    operator # "_i32 $dst, $src0, $src1",
    [(set v16i32:$dst, (OpNode v16i32:$src0, (v16i32 (splat i32:$src1))))],
    opcode,
    Fmt_V,
    Fmt_V,
    Fmt_S>;

  // FR - VVV unmasked - 32 bit integer
  def VVV_U_32 : FR_TwoOp_Unmasked_32<
    (outs VR512W:$dst),
    (ins VR512W:$src0, VR512W:$src1),
    operator # "_i32 $dst, $src0, $src1",
    [(set v16i32:$dst, (OpNode v16i32:$src0, v16i32:$src1))],
    opcode,
    Fmt_V,
    Fmt_V,
    Fmt_V>;

  let Constraints = "$dst = $oldvalue" in {
    // FR - VVS masked - 32 bit integer
    def VVS_M_32 : FR_TwoOp_Masked_32<
      (outs VR512W:$dst),
      (ins VR512W:$src0, GPR32:$src1, VR512W:$oldvalue),
      operator # "_i32.m $dst, $src0, $src1",
      [(set v16i32:$dst, (int_npu_vector_mixi32 (OpNode v16i32:$src0, (v16i32 (splat i32:$src1))),
        v16i32:$oldvalue))],
      opcode,
      Fmt_V,
      Fmt_V,
      Fmt_S>;

    // FR - VVV masked - 32 bit integer
    def VVV_M_32 : FR_TwoOp_Masked_32<
      (outs VR512W:$dst),
      (ins VR512W:$src0, VR512W:$src1, VR512W:$oldvalue),
      operator # "_i32.m $dst, $src0, $src1",
      [(set v16i32:$dst, (int_npu_vector_mixi32 (OpNode v16i32:$src0, v16i32:$src1),
        v16i32:$oldvalue))],
      opcode,
      Fmt_V,
      Fmt_V,
      Fmt_V>;

  }
  // FI - SSI
  def SSI : FI_OneOp_Unmasked<
    (outs GPR32:$dst),
    (ins GPR32:$src, SIMM9OP:$imm),
    operator # "i $dst, $src, $imm",
    [(set i32:$dst, (OpNode i32:$src, (i32 simm9:$imm)))],
    opcode{4-0},
    Fmt_S,
    Fmt_S>;

  // FI - VVI unmasked
  def VVI_U : FI_OneOp_Unmasked<
    (outs VR512W:$dst),
    (ins VR512W:$src, SIMM9OP:$imm),
    operator # "i $dst, $src, $imm",
    [(set v16i32:$dst, (OpNode v16i32:$src, (v16i32 (splat simm9:$imm))))],
    opcode{4-0},
    Fmt_V,
    Fmt_V>;

  // FI - VVI masked
  let Constraints = "$dst = $oldvalue" in {
    def VVI_M : FI_OneOp_Masked<
    (outs VR512W:$dst),
    (ins VR512W:$src, SIMM9OP:$imm, VR512W:$oldvalue),
    operator # "i.m $dst, $src, $imm",
    [(set v16i32:$dst, (int_npu_vector_mixi32 (OpNode v16i32:$src, (v16i32 (splat simm9:$imm))), v16i32:$oldvalue))],
    opcode{4-0},
    Fmt_V,
    Fmt_V>;
  }
}

multiclass FArithInt_OneOp<string operator, SDNode OpNode, bits<6> opcode> {
  // FR - SS - 32 bit integer
  def SS_32 : FR_OneOp_Unmasked_32<
    (outs GPR32:$dst),
    (ins GPR32:$src0),
    operator # "_i32 $dst, $src0",
    [(set i32:$dst, (OpNode i32:$src0))],
    opcode,
    Fmt_S,
    Fmt_S>;

  // FR - VV unmasked - 32 bit integer
  def VV_U_32 : FR_OneOp_Unmasked_32<
    (outs VR512W:$dst),
    (ins VR512W:$src0),
    operator # "_i32 $dst, $src0",
    [(set v16i32:$dst, (OpNode v16i32:$src0))],
    opcode,
    Fmt_V,
    Fmt_V>;

  let Constraints = "$dst = $oldvalue" in {
    // FR - VV masked - 32 bit integer
    def VV_M_32 : FR_OneOp_Masked_32<
      (outs VR512W:$dst),
      (ins VR512W:$src0, VR512W:$oldvalue),
      operator # "_i32.m $dst, $src0",
      [(set v16i32:$dst, (int_npu_vector_mixi32 (OpNode v16i32:$src0),
        v16i32:$oldvalue))],
      opcode,
      Fmt_V,
      Fmt_V>;

  }
}

// per la shift rotate
multiclass FSRInt_TwoOp<string operator, SDNode OpNode, bits<6> opcode> {
  // FR - SSS - 32 bit integer
  def SSS_32 : FR_TwoOp_Unmasked_32<
    (outs GPR32:$dst),
    (ins GPR32:$src0, GPR32:$src1),
    operator # "_i32 $dst, $src0, $src1",
    [(set i32:$dst, (OpNode i32:$src0, i32:$src1))],
    opcode,
    Fmt_S,
    Fmt_S,
    Fmt_S>;

  // FR - VVS unmasked - 32 bit integer
  def VVS_U_32 : FR_TwoOp_Unmasked_32<
    (outs VR512W:$dst),
    (ins VR512W:$src0, GPR32:$src1),
    operator # "_i32 $dst, $src0, $src1",
    [(set v16i32:$dst, (OpNode v16i32:$src0, (v16i32 (splat i32:$src1))))],
    opcode,
    Fmt_V,
    Fmt_V,
    Fmt_S>;

  // FR - VVV unmasked - 32 bit integer
  def VVV_U_32 : FR_TwoOp_Unmasked_32<
    (outs VR512W:$dst),
    (ins VR512W:$src0, VR512W:$src1),
    operator # "_i32 $dst, $src0, $src1",
    [(set v16i32:$dst, (OpNode v16i32:$src0, v16i32:$src1))],
    opcode,
    Fmt_V,
    Fmt_V,
    Fmt_V>;

  let Constraints = "$dst = $oldvalue" in {
    // FR - VVS masked - 32 bit integer
    def VVS_M_32 : FR_TwoOp_Masked_32<
      (outs VR512W:$dst),
      (ins VR512W:$src0, GPR32:$src1, VR512W:$oldvalue),
      operator # "_i32.m $dst, $src0, $src1",
      [(set v16i32:$dst, (int_npu_vector_mixi32 (OpNode v16i32:$src0, (v16i32 (splat i32:$src1))),
        v16i32:$oldvalue))],
      opcode,
      Fmt_V,
      Fmt_V,
      Fmt_S>;

    // FR - VVV masked - 32 bit integer
    def VVV_M_32 : FR_TwoOp_Masked_32<
      (outs VR512W:$dst),
      (ins VR512W:$src0, VR512W:$src1, VR512W:$oldvalue),
      operator # "_i32.m $dst, $src0, $src1",
      [(set v16i32:$dst, (int_npu_vector_mixi32 (OpNode v16i32:$src0, v16i32:$src1),
        v16i32:$oldvalue))],
      opcode,
      Fmt_V,
      Fmt_V,
      Fmt_V>;

  }
  // FI - SSI
  def SSI : FI_OneOp_Unmasked<
    (outs GPR32:$dst),
    (ins GPR32:$src, SIMM9OP:$imm),
    operator # "i $dst, $src, $imm",
    [(set i32:$dst, (OpNode i32:$src, (i32 simm9:$imm)))],
    opcode{4-0},
    Fmt_S,
    Fmt_S>;

  // FI - VVI unmasked
  def VVI_U : FI_OneOp_Unmasked<
    (outs VR512W:$dst),
    (ins VR512W:$src, SIMM9OP:$imm),
    operator # "i $dst, $src, $imm",
    [(set v16i32:$dst, (OpNode v16i32:$src, (v16i32 (splat simm9:$imm))))],
    opcode{4-0},
    Fmt_V,
    Fmt_V>;

  // FI - VVI masked
  let Constraints = "$dst = $oldvalue" in {
    def VVI_M : FI_OneOp_Masked<
    (outs VR512W:$dst),
    (ins VR512W:$src, SIMM9OP:$imm, VR512W:$oldvalue),
    operator # "i.m $dst, $src, $imm",
    [(set v16i32:$dst, (int_npu_vector_mixi32 (OpNode v16i32:$src, (v16i32 (splat simm9:$imm))), v16i32:$oldvalue))],
    opcode{4-0},
    Fmt_V,
    Fmt_V>;
  }
}

multiclass FArithFloat_TwoOp<string operator, SDNode OpNode, bits<6> opcode> {
  // FR - SSS - 32 bit float
  def SSS_32 : FR_TwoOp_Unmasked_32<
    (outs GPR32:$dst),
    (ins GPR32:$src0, GPR32:$src1),
    operator # "_f32 $dst, $src0, $src1",
    [(set f32:$dst, (OpNode f32:$src0, f32:$src1))],
    opcode,
    Fmt_S,
    Fmt_S,
    Fmt_S>;

  // FR - VVS unmasked - 32 bit float
  def VVS_U_32 : FR_TwoOp_Unmasked_32<
    (outs VR512W:$dst),
    (ins VR512W:$src0, GPR32:$src1),
    operator # "_f32 $dst, $src0, $src1",
    [(set v16f32:$dst, (OpNode v16f32:$src0, (splat f32:$src1)))],
    opcode,
    Fmt_V,
    Fmt_V,
    Fmt_S>;

  // FR - VVV unmasked - 32 bit float
  def VVV_U_32 : FR_TwoOp_Unmasked_32<
    (outs VR512W:$dst),
    (ins VR512W:$src0, VR512W:$src1),
    operator # "_f32 $dst, $src0, $src1",
    [(set v16f32:$dst, (OpNode v16f32:$src0, v16f32:$src1))],
    opcode,
    Fmt_V,
    Fmt_V,
    Fmt_V>;

  let Constraints = "$dst = $oldvalue" in {
    // FR - VVS masked - 32 bit float
    def VVS_M_32 : FR_TwoOp_Masked_32<
    (outs VR512W:$dst),
    (ins VR512W:$src0, GPR32:$src1, VR512W:$oldvalue),
    operator # "_f32.m $dst, $src0, $src1",
    [(set v16f32:$dst, (int_npu_vector_mixf32 (OpNode v16f32:$src0, (splat f32:$src1)),
        v16f32:$oldvalue))],
    opcode,
    Fmt_V,
    Fmt_V,
    Fmt_S>;

    // FR - VVV masked - 32 bit float
    def VVV_M_32 : FR_TwoOp_Masked_32<
    (outs VR512W:$dst),
    (ins VR512W:$src0, VR512W:$src1, VR512W:$oldvalue),
    operator # "_f32.m $dst, $src0, $src1",
    [(set v16f32:$dst, (int_npu_vector_mixf32 (OpNode v16f32:$src0, v16f32:$src1),
        v16f32:$oldvalue))],
    opcode,
    Fmt_V,
    Fmt_V,
    Fmt_V>;
}
}

multiclass FArithFloat_OneOp<string operator, SDNode OpNode, bits<6> opcode> {
  // FR - SS - 32 bit float
  def SS_32 : FR_OneOp_Unmasked_32<
    (outs GPR32:$dst),
    (ins GPR32:$src0),
    operator # "_f32 $dst, $src0",
    [(set f32:$dst, (OpNode f32:$src0))],
    opcode,
    Fmt_S,
    Fmt_S>;

  // FR - VV unmasked - 32 bit float
  def VV_U_32 : FR_OneOp_Unmasked_32<
    (outs VR512W:$dst),
    (ins VR512W:$src0),
    operator # "_f32 $dst, $src0",
    [(set v16f32:$dst, (OpNode v16f32:$src0))],
    opcode,
    Fmt_V,
    Fmt_V>;

  let Constraints = "$dst = $oldvalue" in {
    // FR - VV masked - 32 bit float
    def VV_M_32 : FR_OneOp_Masked_32<
      (outs VR512W:$dst),
      (ins VR512W:$src0, VR512W:$oldvalue),
      operator # "_f32.m $dst, $src0",
      [(set v16f32:$dst, (int_npu_vector_mixi32 (OpNode v16i32:$src0),
        v16i32:$oldvalue))],
      opcode,
      Fmt_V,
      Fmt_V>;
}
}

// Condition codes defined in include/llvm/CodeGen/ISDOpcodes.h
// VS and VV comparisons are handled through intrinsics
multiclass FCompInt<string operator, CondCode condition,
  bits<6> opcode, Intrinsic vectorIntr32> {
  // FR - SSS - 32 bit integer
  def SSS_32 : FR_TwoOp_Unmasked_32<
    (outs GPR32:$dst),
    (ins GPR32:$src0, GPR32:$src1),
    operator # "_i32 $dst, $src0, $src1",
    [(set i32:$dst, (setcc i32:$src0, i32:$src1, condition))],
    opcode,
    Fmt_S,
    Fmt_S,
    Fmt_S>;

  // FR - SVS unmasked - 32 bit integer
  def SVS_U_32 : FR_TwoOp_Unmasked_32<
    (outs GPR32:$dst),
    (ins VR512W:$src0, GPR32:$src1),
    operator # "_i32 $dst, $src0, $src1",
    [(set i32:$dst, (vectorIntr32 v16i32:$src0, (splat i32:$src1)))],
    opcode,
    Fmt_S,
    Fmt_V,
    Fmt_S>;

  // FR - SVV unmasked - 32 bit integer
  def SVV_U_32 : FR_TwoOp_Unmasked_32<
    (outs GPR32:$dst),
    (ins VR512W:$src0, VR512W:$src1),
    operator # "_i32 $dst, $src0, $src1",
    [(set i32:$dst, (vectorIntr32 v16i32:$src0, v16i32:$src1))],
    opcode,
    Fmt_S,
    Fmt_V,
    Fmt_V>;

  // FI - SSI
  def SSI : FI_OneOp_Unmasked<
    (outs GPR32:$dst),
    (ins GPR32:$src, SIMM9OP:$imm),
    operator # "i $dst, $src, $imm",
    [(set i32:$dst, (setcc i32:$src, simm9:$imm, condition))],
    opcode{4-0},
    Fmt_S,
    Fmt_S>;

  // FI - SVI unmasked
  def SVI : FI_OneOp_Unmasked<
    (outs GPR32:$dst),
    (ins VR512W:$src, SIMM9OP:$imm),
    operator # "i $dst, $src, $imm",
    [(set i32:$dst, (vectorIntr32 v16i32:$src, (splat simm9:$imm)))],
    opcode{4-0},
    Fmt_S,
    Fmt_V>;
}

multiclass FCompFloat<string operator, SDNode OpNode,
  bits<6> opcode, Intrinsic vectorIntr32> {
  // FR - SSS - 32 bit float
  def SSS_32 : FR_TwoOp_Unmasked_32<
    (outs GPR32:$dst),
    (ins GPR32:$src0, GPR32:$src1),
    operator # "_f32 $dst, $src0, $src1",
    [(set i32:$dst, (OpNode f32:$src0, f32:$src1))],
    opcode,
    Fmt_S,
    Fmt_S,
    Fmt_S>;

  // FR - SVS unmasked - 32 bit float
  def SVS_U_32 : FR_TwoOp_Unmasked_32<
    (outs GPR32:$dst),
    (ins VR512W:$src0, GPR32:$src1),
    operator # "_f32 $dst, $src0, $src1",
    [(set i32:$dst, (vectorIntr32 v16f32:$src0, (splat f32:$src1)))],
    opcode,
    Fmt_S,
    Fmt_V,
    Fmt_S>;

  // FR - SVV unmasked - 32 bit float
  def SVV_U_32 : FR_TwoOp_Unmasked_32<
    (outs GPR32:$dst),
    (ins VR512W:$src0, VR512W:$src1),
    operator # "_f32 $dst, $src0, $src1",
    [(set i32:$dst, (vectorIntr32 v16f32:$src0, v16f32:$src1))],
    opcode,
    Fmt_S,
    Fmt_V,
    Fmt_V>;

}

multiclass FSext_32<string operator, ValueType vt,
  bits<6> opcode, ValueType vt_v> {
  // FR - SS
  def SS : FR_OneOp_Unmasked_32<
    (outs GPR32:$dst),
    (ins GPR32:$src0),
    operator # "_i32 $dst, $src0",
    [(set i32:$dst, (sext_inreg i32:$src0, vt))],
    opcode,
    Fmt_S,
    Fmt_S>;

  // FR - VV unmasked
  def VV_U : FR_OneOp_Unmasked_32<
    (outs VR512W:$dst),
    (ins VR512W:$src0),
    operator # "_i32 $dst, $src0",
    [(set v16i32:$dst, (sext vt_v:$src0))],
    opcode,
    Fmt_V,
    Fmt_V>;

  let Constraints = "$dst = $oldvalue" in {
    // FR - VV masked
    def VV_M : FR_OneOp_Masked_32<
      (outs VR512W:$dst),
      (ins VR512W:$src0, VR512W:$oldvalue),
      operator # "_i32.m $dst, $src0",
      [(set v16i32:$dst, (int_npu_vector_mixi32 (sext vt_v:$src0),
        v16i32:$oldvalue))],
      opcode,
      Fmt_V,
      Fmt_V>;
  }
}

//===----------------------------------------------------------------------===//
//  A set of multiclasses used to handle Loads and Stores
//===----------------------------------------------------------------------===//

// Scalar LOAD
multiclass FMLoadScalar_32<string suffix, PatFrag op_mem, PatFrag op_scratch, bits<6> opcode> {

  def _Mainmem : FM_Unmasked_Mainmem<
    (outs GPR32:$dstsrc), 
    (ins MEMri:$addr), 
    "load32" # suffix # " $dstsrc, $addr", 
    [(set i32:$dstsrc, (i32 (op_mem ADDRri:$addr)))], 
    opcode, 
    0>;

  def _Scratchpad : FM_Unmasked_Scratchpad<
    (outs GPR32:$dstsrc),
    (ins MEMri:$addr),
    "load32" # suffix # "_scratchpad $dstsrc, $addr",
    [(set i32:$dstsrc, (i32 (op_scratch ADDRri:$addr)))],
    opcode,
    0>;

}

// Scalar STORE
multiclass FMStoreScalar_32<string suffix, PatFrag op_mem, PatFrag op_scratch, bits<6> opcode> {

  def _Mainmem : FM_Unmasked_Mainmem<
    (outs),
    (ins GPR32:$dstsrc, MEMri:$addr),
    "store32" # suffix # " $dstsrc, $addr",
    [(op_mem i32:$dstsrc, ADDRri:$addr)],
    opcode, 
    0>;

  def _Scratchpad : FM_Unmasked_Scratchpad<
    (outs),
    (ins GPR32:$dstsrc, MEMri:$addr),
    "store32" # suffix # "_scratchpad $dstsrc, $addr",
    [(op_scratch i32:$dstsrc, ADDRri:$addr)],
    opcode,
    0>{
        let hasSideEffects = 1;
        let mayStore = 1;
      }
}

// Vector LOAD
multiclass FMLoadVector_32<string suffix, PatFrag op_Umem, PatFrag op_Uscratch, 
                           Intrinsic op_Mmem, Intrinsic op_Mscratch, bits<6> opcode> {

  // main memory - unmasked - 32
  def Mainmem_U : FM_Unmasked_Mainmem_32<
    (outs VR512W:$dstsrc),
    (ins MEMri:$addr),
    "load" # suffix # " $dstsrc, $addr",
    [(set v16i32:$dstsrc, (op_Umem ADDRri:$addr))],
    opcode>;

  // scratchpad memory - unmasked - 32
  def Scratchpad_U : FM_Unmasked_Scratchpad_32<
    (outs VR512W:$dstsrc),
    (ins MEMri:$addr),
    "load" # suffix # "_scratchpad $dstsrc, $addr",
    [(set v16i32:$dstsrc, (op_Uscratch ADDRri:$addr))],
    opcode>;

  // main memory - masked - 32
  def Mainmem_M : FM_Masked_Mainmem_32<
    (outs VR512W:$dstsrc),
    (ins MEMri:$addr),
    "load" # suffix # ".m $dstsrc, $addr",
    [(set v16i32:$dstsrc, (op_Mmem ADDRri:$addr))],
    opcode>;

  // scratchpad memory - masked - 32
  def Scratchpad_M : FM_Masked_Scratchpad_32<
    (outs VR512W:$dstsrc),
    (ins MEMri:$addr),
    "load" # suffix # "_scratchpad.m $dstsrc, $addr",
    [(set v16i32:$dstsrc, (op_Mscratch ADDRri:$addr))],
    opcode>;
}

// Vector GATHER
multiclass FMGather_32<string suffix, Intrinsic op_Uscratch, 
                           Intrinsic op_Mscratch, bits<6> opcode> {
  
  // scratchpad memory - unmasked - 32
  def Scratchpad_U : FM_Unmasked_Scratchpad_32<
    (outs VR512W:$dstsrc),
    (ins V16MEMri:$addr),
    "loadg" # suffix # "_scratchpad $dstsrc, $addr",
    [(set v16i32:$dstsrc, (op_Uscratch V16ADDRri:$addr))],
    opcode>;
  // scratchpad memory - masked - 32
  def Scratchpad_M : FM_Masked_Scratchpad_32<
    (outs VR512W:$dstsrc),
    (ins V16MEMri:$addr),
    "loadg" # suffix # "_scratchpad.m $dstsrc, $addr",
    [(set v16i32:$dstsrc, (op_Mscratch V16ADDRri:$addr))],
    opcode>;
}

// Vector STORE
multiclass FMStoreVector_32<string suffix, PatFrag op_Umem, PatFrag op_Uscratch, 
                           Intrinsic op_Mmem, Intrinsic op_Mscratch, bits<6> opcode> {

  // main memory - unmasked - 32
  def Mainmem_U : FM_Unmasked_Mainmem_32<
    (outs),
    (ins VR512W:$dstsrc, MEMri:$addr),
    "store" # suffix # " $dstsrc, $addr",
    [(op_Umem v16i32:$dstsrc, ADDRri:$addr)],
    opcode>;
  // scratchpad memory - unmasked - 32
  def Scratchpad_U : FM_Unmasked_Scratchpad_32<
    (outs),
    (ins VR512W:$dstsrc, MEMri:$addr),
    "store" # suffix # "_scratchpad $dstsrc, $addr",
    [(op_Uscratch v16i32:$dstsrc, ADDRri:$addr)],
    opcode>;
  // main memory - masked - 32
  def Mainmem_M : FM_Masked_Mainmem_32<
    (outs),
    (ins VR512W:$dstsrc, MEMri:$addr),
    "store" # suffix # ".m $dstsrc, $addr",
    [(op_Mmem ADDRri:$addr, v16i32:$dstsrc)],
    opcode>;
  // scratchpad memory - masked - 32
  def Scratchpad_M : FM_Masked_Scratchpad_32<
    (outs),
    (ins VR512W:$dstsrc, MEMri:$addr),
    "store" # suffix # "_scratchpad.m $dstsrc, $addr",
    [(op_Mscratch ADDRri:$addr, v16i32:$dstsrc)],
    opcode>;
}

// Vector SCATTER
multiclass FMScatter_32<string suffix, Intrinsic op_Uscratch, 
                           Intrinsic op_Mscratch, bits<6> opcode> {

  // scratchpad memory - unmasked - 32
  def Scratchpad_U : FM_Unmasked_Scratchpad_32<
    (outs),
    (ins VR512W:$dstsrc, V16MEMri:$addr),
    "stores" # suffix # "_scratchpad $dstsrc, $addr",
    [(op_Uscratch V16ADDRri:$addr, v16i32:$dstsrc)],
    opcode>;

  // scratchpad memory - masked - 32
  def Scratchpad_M : FM_Masked_Scratchpad_32<
    (outs),
    (ins VR512W:$dstsrc, V16MEMri:$addr),
    "stores" # suffix # "_scratchpad.m $dstsrc, $addr",
    [(op_Mscratch V16ADDRri:$addr, v16i32:$dstsrc)],
    opcode>;
}

//===----------------------------------------------------------------------===//
//  A set of multiclasses is used to handle Vector/Scalar 
//  Masked/Unmasked combinations
//  MOVEI operations
//===----------------------------------------------------------------------===//

multiclass FMOVEI_ALL<string operator, bits<3> opcode> {
  // SI
  def SI : FMOVEI<
    (outs GPR32:$dst),
    (ins SIMM16OP:$imm),
    operator # " $dst, $imm",
    [],//[(set i32:$dst, simm16:$imm)],
    opcode,
    Fmt_S,
    0>;

  // VI unmasked
  def VI_U : FMOVEI<
    (outs VR512W:$dst),
    (ins SIMM16OP:$imm),
    operator # " $dst, $imm",
    [],//[(set v16i32:$dst, (splat simm16:$imm))],
    opcode,
    Fmt_V,
    0>;

 let Constraints = "$dst = $oldvalue", Uses = [MR_REG] in {
    // VI masked
    def VI_M : FMOVEI<
      (outs VR512W:$dst),
      (ins SIMM16OP:$imm, VR512W:$oldvalue),
      operator # ".m $dst, $imm",
      [(set v16i32:$dst, (int_npu_vector_mixi32 (splat simm16:$imm), v16i32:$oldvalue))],
      opcode,
      Fmt_V,
      1>;
  }
}



//===----------------------------------------------------------------------===//
// Instruction class used to read/write special register through intrinics
// All these instructions are implemented using a move instruction
//===----------------------------------------------------------------------===//
let DecoderNamespace = "Read_SPR" in {
  class READ_SPR<SpReg reg, string operator, Intrinsic read_intr> :
                FR_OneOp_Unmasked_32<
                  (outs GPR32:$dst),
                  (ins),
                  operator # " $dst",
                  [(set i32:$dst, (read_intr))],
                  32,
                  Fmt_S,
                  Fmt_S>
  {
    bits<6> dst;

    let Inst{29-24} = 32; // opcode: move
    let Inst{23-18} = dst;
    let Inst{17-12} = reg.Register;
    let Inst{11-6} = 0;
  }
}
let DecoderNamespace = "Write_SPR" in {
  class WRITE_SPR<SpReg reg, string operator, Intrinsic read_intr> :
                FR_OneOp_Unmasked_32<
                  (outs),
                  (ins GPR32:$src),
                  operator # " $src",
                  [(read_intr i32:$src)],
                  32,
                  Fmt_S,
                  Fmt_S>
  {
    bits<6> src;

    let Inst{29-24} = 32; // opcode: move
    let Inst{23-18} = reg.Register;
    let Inst{17-12} = src;
    let Inst{11-6} = 0;
  }
}


//===----------------------------------------------------------------------===//
//  Pseudo-instructions for alternate assembly syntax (never used by codegen).
//  These are aliases that require C++ handling to convert to the target
//  instruction, while InstAliases can be handled directly by tblgen.
//===----------------------------------------------------------------------===//
class AsmPseudoInst<dag outs, dag ins, string asm>
  : InstNaplesPU<outs, ins, asm, []> {
  let isPseudo = 1;
}

class Pseudo<dag outs, dag ins, list<dag> pattern>
  : InstNaplesPU<outs, ins, "Pseudo", pattern>
{
  let isCodeGenOnly = 1;
  let isPseudo = 1;
  let Inst{31-0} = 0;
}

multiclass AtomicBinary<SDNode OpNode>
{
  def R : Pseudo<
    (outs GPR32:$dst),
    (ins GPR32:$ptr, GPR32:$amt),
    [(set i32:$dst, (OpNode GPR32:$ptr, GPR32:$amt))]>;

  def I : Pseudo<
    (outs GPR32:$dst),
    (ins GPR32:$ptr, SIMM9OP:$amt),
    [(set i32:$dst, (OpNode GPR32:$ptr, simm9:$amt))]>;
}