initial
This commit is contained in:
64
thirdparty/clang/include/llvm/Target/CostTable.h
vendored
Normal file
64
thirdparty/clang/include/llvm/Target/CostTable.h
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
//===-- CostTable.h - Instruction Cost Table handling -----------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
///
|
||||
/// \file
|
||||
/// \brief Cost tables and simple lookup functions
|
||||
///
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_TARGET_COSTTABLE_H_
|
||||
#define LLVM_TARGET_COSTTABLE_H_
|
||||
|
||||
namespace llvm {
|
||||
|
||||
/// Cost Table Entry
|
||||
template <class TypeTy>
|
||||
struct CostTblEntry {
|
||||
int ISD;
|
||||
TypeTy Type;
|
||||
unsigned Cost;
|
||||
};
|
||||
|
||||
/// Find in cost table, TypeTy must be comparable by ==
|
||||
template <class TypeTy>
|
||||
int CostTableLookup(const CostTblEntry<TypeTy> *Tbl,
|
||||
unsigned len, int ISD, TypeTy Ty) {
|
||||
for (unsigned int i = 0; i < len; ++i)
|
||||
if (Tbl[i].ISD == ISD && Tbl[i].Type == Ty)
|
||||
return i;
|
||||
|
||||
// Could not find an entry.
|
||||
return -1;
|
||||
}
|
||||
|
||||
/// Type Conversion Cost Table
|
||||
template <class TypeTy>
|
||||
struct TypeConversionCostTblEntry {
|
||||
int ISD;
|
||||
TypeTy Dst;
|
||||
TypeTy Src;
|
||||
unsigned Cost;
|
||||
};
|
||||
|
||||
/// Find in type conversion cost table, TypeTy must be comparable by ==
|
||||
template <class TypeTy>
|
||||
int ConvertCostTableLookup(const TypeConversionCostTblEntry<TypeTy> *Tbl,
|
||||
unsigned len, int ISD, TypeTy Dst, TypeTy Src) {
|
||||
for (unsigned int i = 0; i < len; ++i)
|
||||
if (Tbl[i].ISD == ISD && Tbl[i].Src == Src && Tbl[i].Dst == Dst)
|
||||
return i;
|
||||
|
||||
// Could not find an entry.
|
||||
return -1;
|
||||
}
|
||||
|
||||
} // namespace llvm
|
||||
|
||||
|
||||
#endif /* LLVM_TARGET_COSTTABLE_H_ */
|
||||
72
thirdparty/clang/include/llvm/Target/Mangler.h
vendored
Normal file
72
thirdparty/clang/include/llvm/Target/Mangler.h
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
//===-- llvm/Target/Mangler.h - Self-contained name mangler -----*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Unified name mangler for various backends.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_TARGET_MANGLER_H
|
||||
#define LLVM_TARGET_MANGLER_H
|
||||
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
|
||||
namespace llvm {
|
||||
class Twine;
|
||||
class GlobalValue;
|
||||
template <typename T> class SmallVectorImpl;
|
||||
class MCContext;
|
||||
class MCSymbol;
|
||||
class DataLayout;
|
||||
|
||||
class Mangler {
|
||||
public:
|
||||
enum ManglerPrefixTy {
|
||||
Default, ///< Emit default string before each symbol.
|
||||
Private, ///< Emit "private" prefix before each symbol.
|
||||
LinkerPrivate ///< Emit "linker private" prefix before each symbol.
|
||||
};
|
||||
|
||||
private:
|
||||
MCContext &Context;
|
||||
const DataLayout &TD;
|
||||
|
||||
/// AnonGlobalIDs - We need to give global values the same name every time
|
||||
/// they are mangled. This keeps track of the number we give to anonymous
|
||||
/// ones.
|
||||
///
|
||||
DenseMap<const GlobalValue*, unsigned> AnonGlobalIDs;
|
||||
|
||||
/// NextAnonGlobalID - This simple counter is used to unique value names.
|
||||
///
|
||||
unsigned NextAnonGlobalID;
|
||||
|
||||
public:
|
||||
Mangler(MCContext &context, const DataLayout &td)
|
||||
: Context(context), TD(td), NextAnonGlobalID(1) {}
|
||||
|
||||
/// getSymbol - Return the MCSymbol for the specified global value. This
|
||||
/// symbol is the main label that is the address of the global.
|
||||
MCSymbol *getSymbol(const GlobalValue *GV);
|
||||
|
||||
/// getNameWithPrefix - Fill OutName with the name of the appropriate prefix
|
||||
/// and the specified global variable's name. If the global variable doesn't
|
||||
/// have a name, this fills in a unique name for the global.
|
||||
void getNameWithPrefix(SmallVectorImpl<char> &OutName, const GlobalValue *GV,
|
||||
bool isImplicitlyPrivate);
|
||||
|
||||
/// getNameWithPrefix - Fill OutName with the name of the appropriate prefix
|
||||
/// and the specified name as the global variable name. GVName must not be
|
||||
/// empty.
|
||||
void getNameWithPrefix(SmallVectorImpl<char> &OutName, const Twine &GVName,
|
||||
ManglerPrefixTy PrefixTy = Mangler::Default);
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif // LLVM_TARGET_MANGLER_H
|
||||
165
thirdparty/clang/include/llvm/Target/TargetCallingConv.h
vendored
Normal file
165
thirdparty/clang/include/llvm/Target/TargetCallingConv.h
vendored
Normal file
@@ -0,0 +1,165 @@
|
||||
//===-- llvm/Target/TargetCallingConv.h - Calling Convention ----*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines types for working with calling-convention information.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_TARGET_TARGETCALLINGCONV_H
|
||||
#define LLVM_TARGET_TARGETCALLINGCONV_H
|
||||
|
||||
#include "llvm/Support/DataTypes.h"
|
||||
#include "llvm/Support/MathExtras.h"
|
||||
#include <string>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
namespace ISD {
|
||||
struct ArgFlagsTy {
|
||||
private:
|
||||
static const uint64_t NoFlagSet = 0ULL;
|
||||
static const uint64_t ZExt = 1ULL<<0; ///< Zero extended
|
||||
static const uint64_t ZExtOffs = 0;
|
||||
static const uint64_t SExt = 1ULL<<1; ///< Sign extended
|
||||
static const uint64_t SExtOffs = 1;
|
||||
static const uint64_t InReg = 1ULL<<2; ///< Passed in register
|
||||
static const uint64_t InRegOffs = 2;
|
||||
static const uint64_t SRet = 1ULL<<3; ///< Hidden struct-ret ptr
|
||||
static const uint64_t SRetOffs = 3;
|
||||
static const uint64_t ByVal = 1ULL<<4; ///< Struct passed by value
|
||||
static const uint64_t ByValOffs = 4;
|
||||
static const uint64_t Nest = 1ULL<<5; ///< Nested fn static chain
|
||||
static const uint64_t NestOffs = 5;
|
||||
static const uint64_t Returned = 1ULL<<6; ///< Always returned
|
||||
static const uint64_t ReturnedOffs = 6;
|
||||
static const uint64_t ByValAlign = 0xFULL<<7; ///< Struct alignment
|
||||
static const uint64_t ByValAlignOffs = 7;
|
||||
static const uint64_t Split = 1ULL<<11;
|
||||
static const uint64_t SplitOffs = 11;
|
||||
static const uint64_t OrigAlign = 0x1FULL<<27;
|
||||
static const uint64_t OrigAlignOffs = 27;
|
||||
static const uint64_t ByValSize = 0xffffffffULL<<32; ///< Struct size
|
||||
static const uint64_t ByValSizeOffs = 32;
|
||||
|
||||
static const uint64_t One = 1ULL; ///< 1 of this type, for shifts
|
||||
|
||||
uint64_t Flags;
|
||||
public:
|
||||
ArgFlagsTy() : Flags(0) { }
|
||||
|
||||
bool isZExt() const { return Flags & ZExt; }
|
||||
void setZExt() { Flags |= One << ZExtOffs; }
|
||||
|
||||
bool isSExt() const { return Flags & SExt; }
|
||||
void setSExt() { Flags |= One << SExtOffs; }
|
||||
|
||||
bool isInReg() const { return Flags & InReg; }
|
||||
void setInReg() { Flags |= One << InRegOffs; }
|
||||
|
||||
bool isSRet() const { return Flags & SRet; }
|
||||
void setSRet() { Flags |= One << SRetOffs; }
|
||||
|
||||
bool isByVal() const { return Flags & ByVal; }
|
||||
void setByVal() { Flags |= One << ByValOffs; }
|
||||
|
||||
bool isNest() const { return Flags & Nest; }
|
||||
void setNest() { Flags |= One << NestOffs; }
|
||||
|
||||
bool isReturned() const { return Flags & Returned; }
|
||||
void setReturned() { Flags |= One << ReturnedOffs; }
|
||||
|
||||
unsigned getByValAlign() const {
|
||||
return (unsigned)
|
||||
((One << ((Flags & ByValAlign) >> ByValAlignOffs)) / 2);
|
||||
}
|
||||
void setByValAlign(unsigned A) {
|
||||
Flags = (Flags & ~ByValAlign) |
|
||||
(uint64_t(Log2_32(A) + 1) << ByValAlignOffs);
|
||||
}
|
||||
|
||||
bool isSplit() const { return Flags & Split; }
|
||||
void setSplit() { Flags |= One << SplitOffs; }
|
||||
|
||||
unsigned getOrigAlign() const {
|
||||
return (unsigned)
|
||||
((One << ((Flags & OrigAlign) >> OrigAlignOffs)) / 2);
|
||||
}
|
||||
void setOrigAlign(unsigned A) {
|
||||
Flags = (Flags & ~OrigAlign) |
|
||||
(uint64_t(Log2_32(A) + 1) << OrigAlignOffs);
|
||||
}
|
||||
|
||||
unsigned getByValSize() const {
|
||||
return (unsigned)((Flags & ByValSize) >> ByValSizeOffs);
|
||||
}
|
||||
void setByValSize(unsigned S) {
|
||||
Flags = (Flags & ~ByValSize) | (uint64_t(S) << ByValSizeOffs);
|
||||
}
|
||||
|
||||
/// getRawBits - Represent the flags as a bunch of bits.
|
||||
uint64_t getRawBits() const { return Flags; }
|
||||
};
|
||||
|
||||
/// InputArg - This struct carries flags and type information about a
|
||||
/// single incoming (formal) argument or incoming (from the perspective
|
||||
/// of the caller) return value virtual register.
|
||||
///
|
||||
struct InputArg {
|
||||
ArgFlagsTy Flags;
|
||||
MVT VT;
|
||||
bool Used;
|
||||
|
||||
/// Index original Function's argument.
|
||||
unsigned OrigArgIndex;
|
||||
|
||||
/// Offset in bytes of current input value relative to the beginning of
|
||||
/// original argument. E.g. if argument was splitted into four 32 bit
|
||||
/// registers, we got 4 InputArgs with PartOffsets 0, 4, 8 and 12.
|
||||
unsigned PartOffset;
|
||||
|
||||
InputArg() : VT(MVT::Other), Used(false) {}
|
||||
InputArg(ArgFlagsTy flags, EVT vt, bool used,
|
||||
unsigned origIdx, unsigned partOffs)
|
||||
: Flags(flags), Used(used), OrigArgIndex(origIdx), PartOffset(partOffs) {
|
||||
VT = vt.getSimpleVT();
|
||||
}
|
||||
};
|
||||
|
||||
/// OutputArg - This struct carries flags and a value for a
|
||||
/// single outgoing (actual) argument or outgoing (from the perspective
|
||||
/// of the caller) return value virtual register.
|
||||
///
|
||||
struct OutputArg {
|
||||
ArgFlagsTy Flags;
|
||||
MVT VT;
|
||||
|
||||
/// IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
|
||||
bool IsFixed;
|
||||
|
||||
/// Index original Function's argument.
|
||||
unsigned OrigArgIndex;
|
||||
|
||||
/// Offset in bytes of current output value relative to the beginning of
|
||||
/// original argument. E.g. if argument was splitted into four 32 bit
|
||||
/// registers, we got 4 OutputArgs with PartOffsets 0, 4, 8 and 12.
|
||||
unsigned PartOffset;
|
||||
|
||||
OutputArg() : IsFixed(false) {}
|
||||
OutputArg(ArgFlagsTy flags, EVT vt, bool isfixed,
|
||||
unsigned origIdx, unsigned partOffs)
|
||||
: Flags(flags), IsFixed(isfixed), OrigArgIndex(origIdx),
|
||||
PartOffset(partOffs) {
|
||||
VT = vt.getSimpleVT();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
} // end llvm namespace
|
||||
|
||||
#endif
|
||||
121
thirdparty/clang/include/llvm/Target/TargetELFWriterInfo.h
vendored
Normal file
121
thirdparty/clang/include/llvm/Target/TargetELFWriterInfo.h
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
//===-- llvm/Target/TargetELFWriterInfo.h - ELF Writer Info -----*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines the TargetELFWriterInfo class.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_TARGET_TARGETELFWRITERINFO_H
|
||||
#define LLVM_TARGET_TARGETELFWRITERINFO_H
|
||||
|
||||
namespace llvm {
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// TargetELFWriterInfo
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
class TargetELFWriterInfo {
|
||||
protected:
|
||||
// EMachine - This field is the target specific value to emit as the
|
||||
// e_machine member of the ELF header.
|
||||
unsigned short EMachine;
|
||||
bool is64Bit, isLittleEndian;
|
||||
public:
|
||||
|
||||
// Machine architectures
|
||||
enum MachineType {
|
||||
EM_NONE = 0, // No machine
|
||||
EM_M32 = 1, // AT&T WE 32100
|
||||
EM_SPARC = 2, // SPARC
|
||||
EM_386 = 3, // Intel 386
|
||||
EM_68K = 4, // Motorola 68000
|
||||
EM_88K = 5, // Motorola 88000
|
||||
EM_486 = 6, // Intel 486 (deprecated)
|
||||
EM_860 = 7, // Intel 80860
|
||||
EM_MIPS = 8, // MIPS R3000
|
||||
EM_PPC = 20, // PowerPC
|
||||
EM_ARM = 40, // ARM
|
||||
EM_ALPHA = 41, // DEC Alpha
|
||||
EM_SPARCV9 = 43, // SPARC V9
|
||||
EM_X86_64 = 62, // AMD64
|
||||
EM_HEXAGON = 164 // Qualcomm Hexagon
|
||||
};
|
||||
|
||||
// ELF File classes
|
||||
enum {
|
||||
ELFCLASS32 = 1, // 32-bit object file
|
||||
ELFCLASS64 = 2 // 64-bit object file
|
||||
};
|
||||
|
||||
// ELF Endianess
|
||||
enum {
|
||||
ELFDATA2LSB = 1, // Little-endian object file
|
||||
ELFDATA2MSB = 2 // Big-endian object file
|
||||
};
|
||||
|
||||
explicit TargetELFWriterInfo(bool is64Bit_, bool isLittleEndian_);
|
||||
virtual ~TargetELFWriterInfo();
|
||||
|
||||
unsigned short getEMachine() const { return EMachine; }
|
||||
unsigned getEFlags() const { return 0; }
|
||||
unsigned getEIClass() const { return is64Bit ? ELFCLASS64 : ELFCLASS32; }
|
||||
unsigned getEIData() const {
|
||||
return isLittleEndian ? ELFDATA2LSB : ELFDATA2MSB;
|
||||
}
|
||||
|
||||
/// ELF Header and ELF Section Header Info
|
||||
unsigned getHdrSize() const { return is64Bit ? 64 : 52; }
|
||||
unsigned getSHdrSize() const { return is64Bit ? 64 : 40; }
|
||||
|
||||
/// Symbol Table Info
|
||||
unsigned getSymTabEntrySize() const { return is64Bit ? 24 : 16; }
|
||||
|
||||
/// getPrefELFAlignment - Returns the preferred alignment for ELF. This
|
||||
/// is used to align some sections.
|
||||
unsigned getPrefELFAlignment() const { return is64Bit ? 8 : 4; }
|
||||
|
||||
/// getRelocationEntrySize - Entry size used in the relocation section
|
||||
unsigned getRelocationEntrySize() const {
|
||||
return is64Bit ? (hasRelocationAddend() ? 24 : 16)
|
||||
: (hasRelocationAddend() ? 12 : 8);
|
||||
}
|
||||
|
||||
/// getRelocationType - Returns the target specific ELF Relocation type.
|
||||
/// 'MachineRelTy' contains the object code independent relocation type
|
||||
virtual unsigned getRelocationType(unsigned MachineRelTy) const = 0;
|
||||
|
||||
/// hasRelocationAddend - True if the target uses an addend in the
|
||||
/// ELF relocation entry.
|
||||
virtual bool hasRelocationAddend() const = 0;
|
||||
|
||||
/// getDefaultAddendForRelTy - Gets the default addend value for a
|
||||
/// relocation entry based on the target ELF relocation type.
|
||||
virtual long int getDefaultAddendForRelTy(unsigned RelTy,
|
||||
long int Modifier = 0) const = 0;
|
||||
|
||||
/// getRelTySize - Returns the size of relocatable field in bits
|
||||
virtual unsigned getRelocationTySize(unsigned RelTy) const = 0;
|
||||
|
||||
/// isPCRelativeRel - True if the relocation type is pc relative
|
||||
virtual bool isPCRelativeRel(unsigned RelTy) const = 0;
|
||||
|
||||
/// getJumpTableRelocationTy - Returns the machine relocation type used
|
||||
/// to reference a jumptable.
|
||||
virtual unsigned getAbsoluteLabelMachineRelTy() const = 0;
|
||||
|
||||
/// computeRelocation - Some relocatable fields could be relocated
|
||||
/// directly, avoiding the relocation symbol emission, compute the
|
||||
/// final relocation value for this symbol.
|
||||
virtual long int computeRelocation(unsigned SymOffset, unsigned RelOffset,
|
||||
unsigned RelTy) const = 0;
|
||||
};
|
||||
|
||||
} // end llvm namespace
|
||||
|
||||
#endif // LLVM_TARGET_TARGETELFWRITERINFO_H
|
||||
219
thirdparty/clang/include/llvm/Target/TargetFrameLowering.h
vendored
Normal file
219
thirdparty/clang/include/llvm/Target/TargetFrameLowering.h
vendored
Normal file
@@ -0,0 +1,219 @@
|
||||
//===-- llvm/Target/TargetFrameLowering.h ---------------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Interface to describe the layout of a stack frame on the target machine.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_TARGET_TARGETFRAMELOWERING_H
|
||||
#define LLVM_TARGET_TARGETFRAMELOWERING_H
|
||||
|
||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
class CalleeSavedInfo;
|
||||
class MachineFunction;
|
||||
class RegScavenger;
|
||||
|
||||
/// Information about stack frame layout on the target. It holds the direction
|
||||
/// of stack growth, the known stack alignment on entry to each function, and
|
||||
/// the offset to the locals area.
|
||||
///
|
||||
/// The offset to the local area is the offset from the stack pointer on
|
||||
/// function entry to the first location where function data (local variables,
|
||||
/// spill locations) can be stored.
|
||||
class TargetFrameLowering {
|
||||
public:
|
||||
enum StackDirection {
|
||||
StackGrowsUp, // Adding to the stack increases the stack address
|
||||
StackGrowsDown // Adding to the stack decreases the stack address
|
||||
};
|
||||
|
||||
// Maps a callee saved register to a stack slot with a fixed offset.
|
||||
struct SpillSlot {
|
||||
unsigned Reg;
|
||||
int Offset; // Offset relative to stack pointer on function entry.
|
||||
};
|
||||
private:
|
||||
StackDirection StackDir;
|
||||
unsigned StackAlignment;
|
||||
unsigned TransientStackAlignment;
|
||||
int LocalAreaOffset;
|
||||
bool StackRealignable;
|
||||
public:
|
||||
TargetFrameLowering(StackDirection D, unsigned StackAl, int LAO,
|
||||
unsigned TransAl = 1, bool StackReal = true)
|
||||
: StackDir(D), StackAlignment(StackAl), TransientStackAlignment(TransAl),
|
||||
LocalAreaOffset(LAO), StackRealignable(StackReal) {}
|
||||
|
||||
virtual ~TargetFrameLowering();
|
||||
|
||||
// These methods return information that describes the abstract stack layout
|
||||
// of the target machine.
|
||||
|
||||
/// getStackGrowthDirection - Return the direction the stack grows
|
||||
///
|
||||
StackDirection getStackGrowthDirection() const { return StackDir; }
|
||||
|
||||
/// getStackAlignment - This method returns the number of bytes to which the
|
||||
/// stack pointer must be aligned on entry to a function. Typically, this
|
||||
/// is the largest alignment for any data object in the target.
|
||||
///
|
||||
unsigned getStackAlignment() const { return StackAlignment; }
|
||||
|
||||
/// getTransientStackAlignment - This method returns the number of bytes to
|
||||
/// which the stack pointer must be aligned at all times, even between
|
||||
/// calls.
|
||||
///
|
||||
unsigned getTransientStackAlignment() const {
|
||||
return TransientStackAlignment;
|
||||
}
|
||||
|
||||
/// isStackRealignable - This method returns whether the stack can be
|
||||
/// realigned.
|
||||
bool isStackRealignable() const {
|
||||
return StackRealignable;
|
||||
}
|
||||
|
||||
/// getOffsetOfLocalArea - This method returns the offset of the local area
|
||||
/// from the stack pointer on entrance to a function.
|
||||
///
|
||||
int getOffsetOfLocalArea() const { return LocalAreaOffset; }
|
||||
|
||||
/// getCalleeSavedSpillSlots - This method returns a pointer to an array of
|
||||
/// pairs, that contains an entry for each callee saved register that must be
|
||||
/// spilled to a particular stack location if it is spilled.
|
||||
///
|
||||
/// Each entry in this array contains a <register,offset> pair, indicating the
|
||||
/// fixed offset from the incoming stack pointer that each register should be
|
||||
/// spilled at. If a register is not listed here, the code generator is
|
||||
/// allowed to spill it anywhere it chooses.
|
||||
///
|
||||
virtual const SpillSlot *
|
||||
getCalleeSavedSpillSlots(unsigned &NumEntries) const {
|
||||
NumEntries = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// targetHandlesStackFrameRounding - Returns true if the target is
|
||||
/// responsible for rounding up the stack frame (probably at emitPrologue
|
||||
/// time).
|
||||
virtual bool targetHandlesStackFrameRounding() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
|
||||
/// the function.
|
||||
virtual void emitPrologue(MachineFunction &MF) const = 0;
|
||||
virtual void emitEpilogue(MachineFunction &MF,
|
||||
MachineBasicBlock &MBB) const = 0;
|
||||
|
||||
/// Adjust the prologue to have the function use segmented stacks. This works
|
||||
/// by adding a check even before the "normal" function prologue.
|
||||
virtual void adjustForSegmentedStacks(MachineFunction &MF) const { }
|
||||
|
||||
/// Adjust the prologue to add Erlang Run-Time System (ERTS) specific code in
|
||||
/// the assembly prologue to explicitly handle the stack.
|
||||
virtual void adjustForHiPEPrologue(MachineFunction &MF) const { }
|
||||
|
||||
/// spillCalleeSavedRegisters - Issues instruction(s) to spill all callee
|
||||
/// saved registers and returns true if it isn't possible / profitable to do
|
||||
/// so by issuing a series of store instructions via
|
||||
/// storeRegToStackSlot(). Returns false otherwise.
|
||||
virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI,
|
||||
const std::vector<CalleeSavedInfo> &CSI,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee
|
||||
/// saved registers and returns true if it isn't possible / profitable to do
|
||||
/// so by issuing a series of load instructions via loadRegToStackSlot().
|
||||
/// Returns false otherwise.
|
||||
virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI,
|
||||
const std::vector<CalleeSavedInfo> &CSI,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// hasFP - Return true if the specified function should have a dedicated
|
||||
/// frame pointer register. For most targets this is true only if the function
|
||||
/// has variable sized allocas or if frame pointer elimination is disabled.
|
||||
virtual bool hasFP(const MachineFunction &MF) const = 0;
|
||||
|
||||
/// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
|
||||
/// not required, we reserve argument space for call sites in the function
|
||||
/// immediately on entry to the current function. This eliminates the need for
|
||||
/// add/sub sp brackets around call sites. Returns true if the call frame is
|
||||
/// included as part of the stack frame.
|
||||
virtual bool hasReservedCallFrame(const MachineFunction &MF) const {
|
||||
return !hasFP(MF);
|
||||
}
|
||||
|
||||
/// canSimplifyCallFramePseudos - When possible, it's best to simplify the
|
||||
/// call frame pseudo ops before doing frame index elimination. This is
|
||||
/// possible only when frame index references between the pseudos won't
|
||||
/// need adjusting for the call frame adjustments. Normally, that's true
|
||||
/// if the function has a reserved call frame or a frame pointer. Some
|
||||
/// targets (Thumb2, for example) may have more complicated criteria,
|
||||
/// however, and can override this behavior.
|
||||
virtual bool canSimplifyCallFramePseudos(const MachineFunction &MF) const {
|
||||
return hasReservedCallFrame(MF) || hasFP(MF);
|
||||
}
|
||||
|
||||
/// getFrameIndexOffset - Returns the displacement from the frame register to
|
||||
/// the stack frame of the specified index.
|
||||
virtual int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
|
||||
|
||||
/// getFrameIndexReference - This method should return the base register
|
||||
/// and offset used to reference a frame index location. The offset is
|
||||
/// returned directly, and the base register is returned via FrameReg.
|
||||
virtual int getFrameIndexReference(const MachineFunction &MF, int FI,
|
||||
unsigned &FrameReg) const;
|
||||
|
||||
/// processFunctionBeforeCalleeSavedScan - This method is called immediately
|
||||
/// before PrologEpilogInserter scans the physical registers used to determine
|
||||
/// what callee saved registers should be spilled. This method is optional.
|
||||
virtual void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
|
||||
RegScavenger *RS = NULL) const {
|
||||
|
||||
}
|
||||
|
||||
/// processFunctionBeforeFrameFinalized - This method is called immediately
|
||||
/// before the specified function's frame layout (MF.getFrameInfo()) is
|
||||
/// finalized. Once the frame is finalized, MO_FrameIndex operands are
|
||||
/// replaced with direct constants. This method is optional.
|
||||
///
|
||||
virtual void processFunctionBeforeFrameFinalized(MachineFunction &MF,
|
||||
RegScavenger *RS = NULL) const {
|
||||
}
|
||||
|
||||
/// eliminateCallFramePseudoInstr - This method is called during prolog/epilog
|
||||
/// code insertion to eliminate call frame setup and destroy pseudo
|
||||
/// instructions (but only if the Target is using them). It is responsible
|
||||
/// for eliminating these instructions, replacing them with concrete
|
||||
/// instructions. This method need only be implemented if using call frame
|
||||
/// setup/destroy pseudo instructions.
|
||||
///
|
||||
virtual void
|
||||
eliminateCallFramePseudoInstr(MachineFunction &MF,
|
||||
MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI) const {
|
||||
llvm_unreachable("Call Frame Pseudo Instructions do not exist on this "
|
||||
"target!");
|
||||
}
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
984
thirdparty/clang/include/llvm/Target/TargetInstrInfo.h
vendored
Normal file
984
thirdparty/clang/include/llvm/Target/TargetInstrInfo.h
vendored
Normal file
@@ -0,0 +1,984 @@
|
||||
//===-- llvm/Target/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file describes the target machine instruction set to the code generator.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_TARGET_TARGETINSTRINFO_H
|
||||
#define LLVM_TARGET_TARGETINSTRINFO_H
|
||||
|
||||
#include "llvm/ADT/SmallSet.h"
|
||||
#include "llvm/CodeGen/DFAPacketizer.h"
|
||||
#include "llvm/CodeGen/MachineFunction.h"
|
||||
#include "llvm/MC/MCInstrInfo.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class InstrItineraryData;
|
||||
class LiveVariables;
|
||||
class MCAsmInfo;
|
||||
class MachineMemOperand;
|
||||
class MachineRegisterInfo;
|
||||
class MDNode;
|
||||
class MCInst;
|
||||
class MCSchedModel;
|
||||
class SDNode;
|
||||
class ScheduleHazardRecognizer;
|
||||
class SelectionDAG;
|
||||
class ScheduleDAG;
|
||||
class TargetRegisterClass;
|
||||
class TargetRegisterInfo;
|
||||
class BranchProbability;
|
||||
|
||||
template<class T> class SmallVectorImpl;
|
||||
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
///
|
||||
/// TargetInstrInfo - Interface to description of machine instruction set
|
||||
///
|
||||
class TargetInstrInfo : public MCInstrInfo {
|
||||
TargetInstrInfo(const TargetInstrInfo &) LLVM_DELETED_FUNCTION;
|
||||
void operator=(const TargetInstrInfo &) LLVM_DELETED_FUNCTION;
|
||||
public:
|
||||
TargetInstrInfo(int CFSetupOpcode = -1, int CFDestroyOpcode = -1)
|
||||
: CallFrameSetupOpcode(CFSetupOpcode),
|
||||
CallFrameDestroyOpcode(CFDestroyOpcode) {
|
||||
}
|
||||
|
||||
virtual ~TargetInstrInfo();
|
||||
|
||||
/// getRegClass - Givem a machine instruction descriptor, returns the register
|
||||
/// class constraint for OpNum, or NULL.
|
||||
const TargetRegisterClass *getRegClass(const MCInstrDesc &TID,
|
||||
unsigned OpNum,
|
||||
const TargetRegisterInfo *TRI,
|
||||
const MachineFunction &MF) const;
|
||||
|
||||
/// isTriviallyReMaterializable - Return true if the instruction is trivially
|
||||
/// rematerializable, meaning it has no side effects and requires no operands
|
||||
/// that aren't always available.
|
||||
bool isTriviallyReMaterializable(const MachineInstr *MI,
|
||||
AliasAnalysis *AA = 0) const {
|
||||
return MI->getOpcode() == TargetOpcode::IMPLICIT_DEF ||
|
||||
(MI->getDesc().isRematerializable() &&
|
||||
(isReallyTriviallyReMaterializable(MI, AA) ||
|
||||
isReallyTriviallyReMaterializableGeneric(MI, AA)));
|
||||
}
|
||||
|
||||
protected:
|
||||
/// isReallyTriviallyReMaterializable - For instructions with opcodes for
|
||||
/// which the M_REMATERIALIZABLE flag is set, this hook lets the target
|
||||
/// specify whether the instruction is actually trivially rematerializable,
|
||||
/// taking into consideration its operands. This predicate must return false
|
||||
/// if the instruction has any side effects other than producing a value, or
|
||||
/// if it requres any address registers that are not always available.
|
||||
virtual bool isReallyTriviallyReMaterializable(const MachineInstr *MI,
|
||||
AliasAnalysis *AA) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
private:
|
||||
/// isReallyTriviallyReMaterializableGeneric - For instructions with opcodes
|
||||
/// for which the M_REMATERIALIZABLE flag is set and the target hook
|
||||
/// isReallyTriviallyReMaterializable returns false, this function does
|
||||
/// target-independent tests to determine if the instruction is really
|
||||
/// trivially rematerializable.
|
||||
bool isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
|
||||
AliasAnalysis *AA) const;
|
||||
|
||||
public:
|
||||
/// getCallFrameSetup/DestroyOpcode - These methods return the opcode of the
|
||||
/// frame setup/destroy instructions if they exist (-1 otherwise). Some
|
||||
/// targets use pseudo instructions in order to abstract away the difference
|
||||
/// between operating with a frame pointer and operating without, through the
|
||||
/// use of these two instructions.
|
||||
///
|
||||
int getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; }
|
||||
int getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; }
|
||||
|
||||
/// isCoalescableExtInstr - Return true if the instruction is a "coalescable"
|
||||
/// extension instruction. That is, it's like a copy where it's legal for the
|
||||
/// source to overlap the destination. e.g. X86::MOVSX64rr32. If this returns
|
||||
/// true, then it's expected the pre-extension value is available as a subreg
|
||||
/// of the result register. This also returns the sub-register index in
|
||||
/// SubIdx.
|
||||
virtual bool isCoalescableExtInstr(const MachineInstr &MI,
|
||||
unsigned &SrcReg, unsigned &DstReg,
|
||||
unsigned &SubIdx) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// isLoadFromStackSlot - If the specified machine instruction is a direct
|
||||
/// load from a stack slot, return the virtual or physical register number of
|
||||
/// the destination along with the FrameIndex of the loaded stack slot. If
|
||||
/// not, return 0. This predicate must return 0 if the instruction has
|
||||
/// any side effects other than loading from the stack slot.
|
||||
virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
|
||||
int &FrameIndex) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// isLoadFromStackSlotPostFE - Check for post-frame ptr elimination
|
||||
/// stack locations as well. This uses a heuristic so it isn't
|
||||
/// reliable for correctness.
|
||||
virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
|
||||
int &FrameIndex) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// hasLoadFromStackSlot - If the specified machine instruction has
|
||||
/// a load from a stack slot, return true along with the FrameIndex
|
||||
/// of the loaded stack slot and the machine mem operand containing
|
||||
/// the reference. If not, return false. Unlike
|
||||
/// isLoadFromStackSlot, this returns true for any instructions that
|
||||
/// loads from the stack. This is just a hint, as some cases may be
|
||||
/// missed.
|
||||
virtual bool hasLoadFromStackSlot(const MachineInstr *MI,
|
||||
const MachineMemOperand *&MMO,
|
||||
int &FrameIndex) const;
|
||||
|
||||
/// isStoreToStackSlot - If the specified machine instruction is a direct
|
||||
/// store to a stack slot, return the virtual or physical register number of
|
||||
/// the source reg along with the FrameIndex of the loaded stack slot. If
|
||||
/// not, return 0. This predicate must return 0 if the instruction has
|
||||
/// any side effects other than storing to the stack slot.
|
||||
virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
|
||||
int &FrameIndex) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// isStoreToStackSlotPostFE - Check for post-frame ptr elimination
|
||||
/// stack locations as well. This uses a heuristic so it isn't
|
||||
/// reliable for correctness.
|
||||
virtual unsigned isStoreToStackSlotPostFE(const MachineInstr *MI,
|
||||
int &FrameIndex) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// hasStoreToStackSlot - If the specified machine instruction has a
|
||||
/// store to a stack slot, return true along with the FrameIndex of
|
||||
/// the loaded stack slot and the machine mem operand containing the
|
||||
/// reference. If not, return false. Unlike isStoreToStackSlot,
|
||||
/// this returns true for any instructions that stores to the
|
||||
/// stack. This is just a hint, as some cases may be missed.
|
||||
virtual bool hasStoreToStackSlot(const MachineInstr *MI,
|
||||
const MachineMemOperand *&MMO,
|
||||
int &FrameIndex) const;
|
||||
|
||||
/// reMaterialize - Re-issue the specified 'original' instruction at the
|
||||
/// specific location targeting a new destination register.
|
||||
/// The register in Orig->getOperand(0).getReg() will be substituted by
|
||||
/// DestReg:SubIdx. Any existing subreg index is preserved or composed with
|
||||
/// SubIdx.
|
||||
virtual void reMaterialize(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI,
|
||||
unsigned DestReg, unsigned SubIdx,
|
||||
const MachineInstr *Orig,
|
||||
const TargetRegisterInfo &TRI) const;
|
||||
|
||||
/// duplicate - Create a duplicate of the Orig instruction in MF. This is like
|
||||
/// MachineFunction::CloneMachineInstr(), but the target may update operands
|
||||
/// that are required to be unique.
|
||||
///
|
||||
/// The instruction must be duplicable as indicated by isNotDuplicable().
|
||||
virtual MachineInstr *duplicate(MachineInstr *Orig,
|
||||
MachineFunction &MF) const;
|
||||
|
||||
/// convertToThreeAddress - This method must be implemented by targets that
|
||||
/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
|
||||
/// may be able to convert a two-address instruction into one or more true
|
||||
/// three-address instructions on demand. This allows the X86 target (for
|
||||
/// example) to convert ADD and SHL instructions into LEA instructions if they
|
||||
/// would require register copies due to two-addressness.
|
||||
///
|
||||
/// This method returns a null pointer if the transformation cannot be
|
||||
/// performed, otherwise it returns the last new instruction.
|
||||
///
|
||||
virtual MachineInstr *
|
||||
convertToThreeAddress(MachineFunction::iterator &MFI,
|
||||
MachineBasicBlock::iterator &MBBI, LiveVariables *LV) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// commuteInstruction - If a target has any instructions that are
|
||||
/// commutable but require converting to different instructions or making
|
||||
/// non-trivial changes to commute them, this method can overloaded to do
|
||||
/// that. The default implementation simply swaps the commutable operands.
|
||||
/// If NewMI is false, MI is modified in place and returned; otherwise, a
|
||||
/// new machine instruction is created and returned. Do not call this
|
||||
/// method for a non-commutable instruction, but there may be some cases
|
||||
/// where this method fails and returns null.
|
||||
virtual MachineInstr *commuteInstruction(MachineInstr *MI,
|
||||
bool NewMI = false) const;
|
||||
|
||||
/// findCommutedOpIndices - If specified MI is commutable, return the two
|
||||
/// operand indices that would swap value. Return false if the instruction
|
||||
/// is not in a form which this routine understands.
|
||||
virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
|
||||
unsigned &SrcOpIdx2) const;
|
||||
|
||||
/// produceSameValue - Return true if two machine instructions would produce
|
||||
/// identical values. By default, this is only true when the two instructions
|
||||
/// are deemed identical except for defs. If this function is called when the
|
||||
/// IR is still in SSA form, the caller can pass the MachineRegisterInfo for
|
||||
/// aggressive checks.
|
||||
virtual bool produceSameValue(const MachineInstr *MI0,
|
||||
const MachineInstr *MI1,
|
||||
const MachineRegisterInfo *MRI = 0) const;
|
||||
|
||||
/// AnalyzeBranch - Analyze the branching code at the end of MBB, returning
|
||||
/// true if it cannot be understood (e.g. it's a switch dispatch or isn't
|
||||
/// implemented for a target). Upon success, this returns false and returns
|
||||
/// with the following information in various cases:
|
||||
///
|
||||
/// 1. If this block ends with no branches (it just falls through to its succ)
|
||||
/// just return false, leaving TBB/FBB null.
|
||||
/// 2. If this block ends with only an unconditional branch, it sets TBB to be
|
||||
/// the destination block.
|
||||
/// 3. If this block ends with a conditional branch and it falls through to a
|
||||
/// successor block, it sets TBB to be the branch destination block and a
|
||||
/// list of operands that evaluate the condition. These operands can be
|
||||
/// passed to other TargetInstrInfo methods to create new branches.
|
||||
/// 4. If this block ends with a conditional branch followed by an
|
||||
/// unconditional branch, it returns the 'true' destination in TBB, the
|
||||
/// 'false' destination in FBB, and a list of operands that evaluate the
|
||||
/// condition. These operands can be passed to other TargetInstrInfo
|
||||
/// methods to create new branches.
|
||||
///
|
||||
/// Note that RemoveBranch and InsertBranch must be implemented to support
|
||||
/// cases where this method returns success.
|
||||
///
|
||||
/// If AllowModify is true, then this routine is allowed to modify the basic
|
||||
/// block (e.g. delete instructions after the unconditional branch).
|
||||
///
|
||||
virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
|
||||
MachineBasicBlock *&FBB,
|
||||
SmallVectorImpl<MachineOperand> &Cond,
|
||||
bool AllowModify = false) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
/// RemoveBranch - Remove the branching code at the end of the specific MBB.
|
||||
/// This is only invoked in cases where AnalyzeBranch returns success. It
|
||||
/// returns the number of instructions that were removed.
|
||||
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const {
|
||||
llvm_unreachable("Target didn't implement TargetInstrInfo::RemoveBranch!");
|
||||
}
|
||||
|
||||
/// InsertBranch - Insert branch code into the end of the specified
|
||||
/// MachineBasicBlock. The operands to this method are the same as those
|
||||
/// returned by AnalyzeBranch. This is only invoked in cases where
|
||||
/// AnalyzeBranch returns success. It returns the number of instructions
|
||||
/// inserted.
|
||||
///
|
||||
/// It is also invoked by tail merging to add unconditional branches in
|
||||
/// cases where AnalyzeBranch doesn't apply because there was no original
|
||||
/// branch to analyze. At least this much must be implemented, else tail
|
||||
/// merging needs to be disabled.
|
||||
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
|
||||
MachineBasicBlock *FBB,
|
||||
const SmallVectorImpl<MachineOperand> &Cond,
|
||||
DebugLoc DL) const {
|
||||
llvm_unreachable("Target didn't implement TargetInstrInfo::InsertBranch!");
|
||||
}
|
||||
|
||||
/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
|
||||
/// after it, replacing it with an unconditional branch to NewDest. This is
|
||||
/// used by the tail merging pass.
|
||||
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
|
||||
MachineBasicBlock *NewDest) const;
|
||||
|
||||
/// isLegalToSplitMBBAt - Return true if it's legal to split the given basic
|
||||
/// block at the specified instruction (i.e. instruction would be the start
|
||||
/// of a new basic block).
|
||||
virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MBBI) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
/// isProfitableToIfCvt - Return true if it's profitable to predicate
|
||||
/// instructions with accumulated instruction latency of "NumCycles"
|
||||
/// of the specified basic block, where the probability of the instructions
|
||||
/// being executed is given by Probability, and Confidence is a measure
|
||||
/// of our confidence that it will be properly predicted.
|
||||
virtual
|
||||
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
|
||||
unsigned ExtraPredCycles,
|
||||
const BranchProbability &Probability) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// isProfitableToIfCvt - Second variant of isProfitableToIfCvt, this one
|
||||
/// checks for the case where two basic blocks from true and false path
|
||||
/// of a if-then-else (diamond) are predicated on mutally exclusive
|
||||
/// predicates, where the probability of the true path being taken is given
|
||||
/// by Probability, and Confidence is a measure of our confidence that it
|
||||
/// will be properly predicted.
|
||||
virtual bool
|
||||
isProfitableToIfCvt(MachineBasicBlock &TMBB,
|
||||
unsigned NumTCycles, unsigned ExtraTCycles,
|
||||
MachineBasicBlock &FMBB,
|
||||
unsigned NumFCycles, unsigned ExtraFCycles,
|
||||
const BranchProbability &Probability) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// isProfitableToDupForIfCvt - Return true if it's profitable for
|
||||
/// if-converter to duplicate instructions of specified accumulated
|
||||
/// instruction latencies in the specified MBB to enable if-conversion.
|
||||
/// The probability of the instructions being executed is given by
|
||||
/// Probability, and Confidence is a measure of our confidence that it
|
||||
/// will be properly predicted.
|
||||
virtual bool
|
||||
isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
|
||||
const BranchProbability &Probability) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// isProfitableToUnpredicate - Return true if it's profitable to unpredicate
|
||||
/// one side of a 'diamond', i.e. two sides of if-else predicated on mutually
|
||||
/// exclusive predicates.
|
||||
/// e.g.
|
||||
/// subeq r0, r1, #1
|
||||
/// addne r0, r1, #1
|
||||
/// =>
|
||||
/// sub r0, r1, #1
|
||||
/// addne r0, r1, #1
|
||||
///
|
||||
/// This may be profitable is conditional instructions are always executed.
|
||||
virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB,
|
||||
MachineBasicBlock &FMBB) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// canInsertSelect - Return true if it is possible to insert a select
|
||||
/// instruction that chooses between TrueReg and FalseReg based on the
|
||||
/// condition code in Cond.
|
||||
///
|
||||
/// When successful, also return the latency in cycles from TrueReg,
|
||||
/// FalseReg, and Cond to the destination register. In most cases, a select
|
||||
/// instruction will be 1 cycle, so CondCycles = TrueCycles = FalseCycles = 1
|
||||
///
|
||||
/// Some x86 implementations have 2-cycle cmov instructions.
|
||||
///
|
||||
/// @param MBB Block where select instruction would be inserted.
|
||||
/// @param Cond Condition returned by AnalyzeBranch.
|
||||
/// @param TrueReg Virtual register to select when Cond is true.
|
||||
/// @param FalseReg Virtual register to select when Cond is false.
|
||||
/// @param CondCycles Latency from Cond+Branch to select output.
|
||||
/// @param TrueCycles Latency from TrueReg to select output.
|
||||
/// @param FalseCycles Latency from FalseReg to select output.
|
||||
virtual bool canInsertSelect(const MachineBasicBlock &MBB,
|
||||
const SmallVectorImpl<MachineOperand> &Cond,
|
||||
unsigned TrueReg, unsigned FalseReg,
|
||||
int &CondCycles,
|
||||
int &TrueCycles, int &FalseCycles) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// insertSelect - Insert a select instruction into MBB before I that will
|
||||
/// copy TrueReg to DstReg when Cond is true, and FalseReg to DstReg when
|
||||
/// Cond is false.
|
||||
///
|
||||
/// This function can only be called after canInsertSelect() returned true.
|
||||
/// The condition in Cond comes from AnalyzeBranch, and it can be assumed
|
||||
/// that the same flags or registers required by Cond are available at the
|
||||
/// insertion point.
|
||||
///
|
||||
/// @param MBB Block where select instruction should be inserted.
|
||||
/// @param I Insertion point.
|
||||
/// @param DL Source location for debugging.
|
||||
/// @param DstReg Virtual register to be defined by select instruction.
|
||||
/// @param Cond Condition as computed by AnalyzeBranch.
|
||||
/// @param TrueReg Virtual register to copy when Cond is true.
|
||||
/// @param FalseReg Virtual register to copy when Cons is false.
|
||||
virtual void insertSelect(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I, DebugLoc DL,
|
||||
unsigned DstReg,
|
||||
const SmallVectorImpl<MachineOperand> &Cond,
|
||||
unsigned TrueReg, unsigned FalseReg) const {
|
||||
llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!");
|
||||
}
|
||||
|
||||
/// analyzeSelect - Analyze the given select instruction, returning true if
|
||||
/// it cannot be understood. It is assumed that MI->isSelect() is true.
|
||||
///
|
||||
/// When successful, return the controlling condition and the operands that
|
||||
/// determine the true and false result values.
|
||||
///
|
||||
/// Result = SELECT Cond, TrueOp, FalseOp
|
||||
///
|
||||
/// Some targets can optimize select instructions, for example by predicating
|
||||
/// the instruction defining one of the operands. Such targets should set
|
||||
/// Optimizable.
|
||||
///
|
||||
/// @param MI Select instruction to analyze.
|
||||
/// @param Cond Condition controlling the select.
|
||||
/// @param TrueOp Operand number of the value selected when Cond is true.
|
||||
/// @param FalseOp Operand number of the value selected when Cond is false.
|
||||
/// @param Optimizable Returned as true if MI is optimizable.
|
||||
/// @returns False on success.
|
||||
virtual bool analyzeSelect(const MachineInstr *MI,
|
||||
SmallVectorImpl<MachineOperand> &Cond,
|
||||
unsigned &TrueOp, unsigned &FalseOp,
|
||||
bool &Optimizable) const {
|
||||
assert(MI && MI->getDesc().isSelect() && "MI must be a select instruction");
|
||||
return true;
|
||||
}
|
||||
|
||||
/// optimizeSelect - Given a select instruction that was understood by
|
||||
/// analyzeSelect and returned Optimizable = true, attempt to optimize MI by
|
||||
/// merging it with one of its operands. Returns NULL on failure.
|
||||
///
|
||||
/// When successful, returns the new select instruction. The client is
|
||||
/// responsible for deleting MI.
|
||||
///
|
||||
/// If both sides of the select can be optimized, PreferFalse is used to pick
|
||||
/// a side.
|
||||
///
|
||||
/// @param MI Optimizable select instruction.
|
||||
/// @param PreferFalse Try to optimize FalseOp instead of TrueOp.
|
||||
/// @returns Optimized instruction or NULL.
|
||||
virtual MachineInstr *optimizeSelect(MachineInstr *MI,
|
||||
bool PreferFalse = false) const {
|
||||
// This function must be implemented if Optimizable is ever set.
|
||||
llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!");
|
||||
}
|
||||
|
||||
/// copyPhysReg - Emit instructions to copy a pair of physical registers.
|
||||
///
|
||||
/// This function should support copies within any legal register class as
|
||||
/// well as any cross-class copies created during instruction selection.
|
||||
///
|
||||
/// The source and destination registers may overlap, which may require a
|
||||
/// careful implementation when multiple copy instructions are required for
|
||||
/// large registers. See for example the ARM target.
|
||||
virtual void copyPhysReg(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI, DebugLoc DL,
|
||||
unsigned DestReg, unsigned SrcReg,
|
||||
bool KillSrc) const {
|
||||
llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!");
|
||||
}
|
||||
|
||||
/// storeRegToStackSlot - Store the specified register of the given register
|
||||
/// class to the specified stack frame index. The store instruction is to be
|
||||
/// added to the given machine basic block before the specified machine
|
||||
/// instruction. If isKill is true, the register operand is the last use and
|
||||
/// must be marked kill.
|
||||
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI,
|
||||
unsigned SrcReg, bool isKill, int FrameIndex,
|
||||
const TargetRegisterClass *RC,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
llvm_unreachable("Target didn't implement "
|
||||
"TargetInstrInfo::storeRegToStackSlot!");
|
||||
}
|
||||
|
||||
/// loadRegFromStackSlot - Load the specified register of the given register
|
||||
/// class from the specified stack frame index. The load instruction is to be
|
||||
/// added to the given machine basic block before the specified machine
|
||||
/// instruction.
|
||||
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI,
|
||||
unsigned DestReg, int FrameIndex,
|
||||
const TargetRegisterClass *RC,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
llvm_unreachable("Target didn't implement "
|
||||
"TargetInstrInfo::loadRegFromStackSlot!");
|
||||
}
|
||||
|
||||
/// expandPostRAPseudo - This function is called for all pseudo instructions
|
||||
/// that remain after register allocation. Many pseudo instructions are
|
||||
/// created to help register allocation. This is the place to convert them
|
||||
/// into real instructions. The target can edit MI in place, or it can insert
|
||||
/// new instructions and erase MI. The function should return true if
|
||||
/// anything was changed.
|
||||
virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// emitFrameIndexDebugValue - Emit a target-dependent form of
|
||||
/// DBG_VALUE encoding the address of a frame index. Addresses would
|
||||
/// normally be lowered the same way as other addresses on the target,
|
||||
/// e.g. in load instructions. For targets that do not support this
|
||||
/// the debug info is simply lost.
|
||||
/// If you add this for a target you should handle this DBG_VALUE in the
|
||||
/// target-specific AsmPrinter code as well; you will probably get invalid
|
||||
/// assembly output if you don't.
|
||||
virtual MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF,
|
||||
int FrameIx,
|
||||
uint64_t Offset,
|
||||
const MDNode *MDPtr,
|
||||
DebugLoc dl) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// foldMemoryOperand - Attempt to fold a load or store of the specified stack
|
||||
/// slot into the specified machine instruction for the specified operand(s).
|
||||
/// If this is possible, a new instruction is returned with the specified
|
||||
/// operand folded, otherwise NULL is returned.
|
||||
/// The new instruction is inserted before MI, and the client is responsible
|
||||
/// for removing the old instruction.
|
||||
MachineInstr* foldMemoryOperand(MachineBasicBlock::iterator MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
/// foldMemoryOperand - Same as the previous version except it allows folding
|
||||
/// of any load and store from / to any address, not just from a specific
|
||||
/// stack slot.
|
||||
MachineInstr* foldMemoryOperand(MachineBasicBlock::iterator MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const;
|
||||
|
||||
protected:
|
||||
/// foldMemoryOperandImpl - Target-dependent implementation for
|
||||
/// foldMemoryOperand. Target-independent code in foldMemoryOperand will
|
||||
/// take care of adding a MachineMemOperand to the newly created instruction.
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// foldMemoryOperandImpl - Target-dependent implementation for
|
||||
/// foldMemoryOperand. Target-independent code in foldMemoryOperand will
|
||||
/// take care of adding a MachineMemOperand to the newly created instruction.
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
public:
|
||||
/// canFoldMemoryOperand - Returns true for the specified load / store if
|
||||
/// folding is possible.
|
||||
virtual
|
||||
bool canFoldMemoryOperand(const MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops) const;
|
||||
|
||||
/// unfoldMemoryOperand - Separate a single instruction which folded a load or
|
||||
/// a store or a load and a store into two or more instruction. If this is
|
||||
/// possible, returns true as well as the new instructions by reference.
|
||||
virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
|
||||
unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs) const{
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
|
||||
SmallVectorImpl<SDNode*> &NewNodes) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new
|
||||
/// instruction after load / store are unfolded from an instruction of the
|
||||
/// specified opcode. It returns zero if the specified unfolding is not
|
||||
/// possible. If LoadRegIndex is non-null, it is filled in with the operand
|
||||
/// index of the operand which will hold the register holding the loaded
|
||||
/// value.
|
||||
virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
|
||||
bool UnfoldLoad, bool UnfoldStore,
|
||||
unsigned *LoadRegIndex = 0) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler
|
||||
/// to determine if two loads are loading from the same base address. It
|
||||
/// should only return true if the base pointers are the same and the
|
||||
/// only differences between the two addresses are the offset. It also returns
|
||||
/// the offsets by reference.
|
||||
virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
|
||||
int64_t &Offset1, int64_t &Offset2) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
|
||||
/// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should
|
||||
/// be scheduled togther. On some targets if two loads are loading from
|
||||
/// addresses in the same cache line, it's better if they are scheduled
|
||||
/// together. This function takes two integers that represent the load offsets
|
||||
/// from the common base address. It returns true if it decides it's desirable
|
||||
/// to schedule the two loads together. "NumLoads" is the number of loads that
|
||||
/// have already been scheduled after Load1.
|
||||
virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
|
||||
int64_t Offset1, int64_t Offset2,
|
||||
unsigned NumLoads) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// \brief Get the base register and byte offset of a load/store instr.
|
||||
virtual bool getLdStBaseRegImmOfs(MachineInstr *LdSt,
|
||||
unsigned &BaseReg, unsigned &Offset,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual bool shouldClusterLoads(MachineInstr *FirstLdSt,
|
||||
MachineInstr *SecondLdSt,
|
||||
unsigned NumLoads) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// \brief Can this target fuse the given instructions if they are scheduled
|
||||
/// adjacent.
|
||||
virtual bool shouldScheduleAdjacent(MachineInstr* First,
|
||||
MachineInstr *Second) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// ReverseBranchCondition - Reverses the branch condition of the specified
|
||||
/// condition list, returning false on success and true if it cannot be
|
||||
/// reversed.
|
||||
virtual
|
||||
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
/// insertNoop - Insert a noop into the instruction stream at the specified
|
||||
/// point.
|
||||
virtual void insertNoop(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI) const;
|
||||
|
||||
|
||||
/// getNoopForMachoTarget - Return the noop instruction to use for a noop.
|
||||
virtual void getNoopForMachoTarget(MCInst &NopInst) const {
|
||||
// Default to just using 'nop' string.
|
||||
}
|
||||
|
||||
|
||||
/// isPredicated - Returns true if the instruction is already predicated.
|
||||
///
|
||||
virtual bool isPredicated(const MachineInstr *MI) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// isUnpredicatedTerminator - Returns true if the instruction is a
|
||||
/// terminator instruction that has not been predicated.
|
||||
virtual bool isUnpredicatedTerminator(const MachineInstr *MI) const;
|
||||
|
||||
/// PredicateInstruction - Convert the instruction into a predicated
|
||||
/// instruction. It returns true if the operation was successful.
|
||||
virtual
|
||||
bool PredicateInstruction(MachineInstr *MI,
|
||||
const SmallVectorImpl<MachineOperand> &Pred) const;
|
||||
|
||||
/// SubsumesPredicate - Returns true if the first specified predicate
|
||||
/// subsumes the second, e.g. GE subsumes GT.
|
||||
virtual
|
||||
bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
|
||||
const SmallVectorImpl<MachineOperand> &Pred2) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// DefinesPredicate - If the specified instruction defines any predicate
|
||||
/// or condition code register(s) used for predication, returns true as well
|
||||
/// as the definition predicate(s) by reference.
|
||||
virtual bool DefinesPredicate(MachineInstr *MI,
|
||||
std::vector<MachineOperand> &Pred) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// isPredicable - Return true if the specified instruction can be predicated.
|
||||
/// By default, this returns true for every instruction with a
|
||||
/// PredicateOperand.
|
||||
virtual bool isPredicable(MachineInstr *MI) const {
|
||||
return MI->getDesc().isPredicable();
|
||||
}
|
||||
|
||||
/// isSafeToMoveRegClassDefs - Return true if it's safe to move a machine
|
||||
/// instruction that defines the specified register class.
|
||||
virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
/// isSchedulingBoundary - Test if the given instruction should be
|
||||
/// considered a scheduling boundary. This primarily includes labels and
|
||||
/// terminators.
|
||||
virtual bool isSchedulingBoundary(const MachineInstr *MI,
|
||||
const MachineBasicBlock *MBB,
|
||||
const MachineFunction &MF) const;
|
||||
|
||||
/// Measure the specified inline asm to determine an approximation of its
|
||||
/// length.
|
||||
virtual unsigned getInlineAsmLength(const char *Str,
|
||||
const MCAsmInfo &MAI) const;
|
||||
|
||||
/// CreateTargetHazardRecognizer - Allocate and return a hazard recognizer to
|
||||
/// use for this target when scheduling the machine instructions before
|
||||
/// register allocation.
|
||||
virtual ScheduleHazardRecognizer*
|
||||
CreateTargetHazardRecognizer(const TargetMachine *TM,
|
||||
const ScheduleDAG *DAG) const;
|
||||
|
||||
/// CreateTargetMIHazardRecognizer - Allocate and return a hazard recognizer
|
||||
/// to use for this target when scheduling the machine instructions before
|
||||
/// register allocation.
|
||||
virtual ScheduleHazardRecognizer*
|
||||
CreateTargetMIHazardRecognizer(const InstrItineraryData*,
|
||||
const ScheduleDAG *DAG) const;
|
||||
|
||||
/// CreateTargetPostRAHazardRecognizer - Allocate and return a hazard
|
||||
/// recognizer to use for this target when scheduling the machine instructions
|
||||
/// after register allocation.
|
||||
virtual ScheduleHazardRecognizer*
|
||||
CreateTargetPostRAHazardRecognizer(const InstrItineraryData*,
|
||||
const ScheduleDAG *DAG) const;
|
||||
|
||||
/// Provide a global flag for disabling the PreRA hazard recognizer that
|
||||
/// targets may choose to honor.
|
||||
bool usePreRAHazardRecognizer() const;
|
||||
|
||||
/// analyzeCompare - For a comparison instruction, return the source registers
|
||||
/// in SrcReg and SrcReg2 if having two register operands, and the value it
|
||||
/// compares against in CmpValue. Return true if the comparison instruction
|
||||
/// can be analyzed.
|
||||
virtual bool analyzeCompare(const MachineInstr *MI,
|
||||
unsigned &SrcReg, unsigned &SrcReg2,
|
||||
int &Mask, int &Value) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// optimizeCompareInstr - See if the comparison instruction can be converted
|
||||
/// into something more efficient. E.g., on ARM most instructions can set the
|
||||
/// flags register, obviating the need for a separate CMP.
|
||||
virtual bool optimizeCompareInstr(MachineInstr *CmpInstr,
|
||||
unsigned SrcReg, unsigned SrcReg2,
|
||||
int Mask, int Value,
|
||||
const MachineRegisterInfo *MRI) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// optimizeLoadInstr - Try to remove the load by folding it to a register
|
||||
/// operand at the use. We fold the load instructions if and only if the
|
||||
/// def and use are in the same BB. We only look at one load and see
|
||||
/// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
|
||||
/// defined by the load we are trying to fold. DefMI returns the machine
|
||||
/// instruction that defines FoldAsLoadDefReg, and the function returns
|
||||
/// the machine instruction generated due to folding.
|
||||
virtual MachineInstr* optimizeLoadInstr(MachineInstr *MI,
|
||||
const MachineRegisterInfo *MRI,
|
||||
unsigned &FoldAsLoadDefReg,
|
||||
MachineInstr *&DefMI) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// FoldImmediate - 'Reg' is known to be defined by a move immediate
|
||||
/// instruction, try to fold the immediate into the use instruction.
|
||||
/// If MRI->hasOneNonDBGUse(Reg) is true, and this function returns true,
|
||||
/// then the caller may assume that DefMI has been erased from its parent
|
||||
/// block. The caller may assume that it will not be erased by this
|
||||
/// function otherwise.
|
||||
virtual bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
|
||||
unsigned Reg, MachineRegisterInfo *MRI) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// getNumMicroOps - Return the number of u-operations the given machine
|
||||
/// instruction will be decoded to on the target cpu. The itinerary's
|
||||
/// IssueWidth is the number of microops that can be dispatched each
|
||||
/// cycle. An instruction with zero microops takes no dispatch resources.
|
||||
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
|
||||
const MachineInstr *MI) const;
|
||||
|
||||
/// isZeroCost - Return true for pseudo instructions that don't consume any
|
||||
/// machine resources in their current form. These are common cases that the
|
||||
/// scheduler should consider free, rather than conservatively handling them
|
||||
/// as instructions with no itinerary.
|
||||
bool isZeroCost(unsigned Opcode) const {
|
||||
return Opcode <= TargetOpcode::COPY;
|
||||
}
|
||||
|
||||
virtual int getOperandLatency(const InstrItineraryData *ItinData,
|
||||
SDNode *DefNode, unsigned DefIdx,
|
||||
SDNode *UseNode, unsigned UseIdx) const;
|
||||
|
||||
/// getOperandLatency - Compute and return the use operand latency of a given
|
||||
/// pair of def and use.
|
||||
/// In most cases, the static scheduling itinerary was enough to determine the
|
||||
/// operand latency. But it may not be possible for instructions with variable
|
||||
/// number of defs / uses.
|
||||
///
|
||||
/// This is a raw interface to the itinerary that may be directly overriden by
|
||||
/// a target. Use computeOperandLatency to get the best estimate of latency.
|
||||
virtual int getOperandLatency(const InstrItineraryData *ItinData,
|
||||
const MachineInstr *DefMI, unsigned DefIdx,
|
||||
const MachineInstr *UseMI,
|
||||
unsigned UseIdx) const;
|
||||
|
||||
/// computeOperandLatency - Compute and return the latency of the given data
|
||||
/// dependent def and use when the operand indices are already known.
|
||||
///
|
||||
/// FindMin may be set to get the minimum vs. expected latency.
|
||||
unsigned computeOperandLatency(const InstrItineraryData *ItinData,
|
||||
const MachineInstr *DefMI, unsigned DefIdx,
|
||||
const MachineInstr *UseMI, unsigned UseIdx,
|
||||
bool FindMin = false) const;
|
||||
|
||||
/// getInstrLatency - Compute the instruction latency of a given instruction.
|
||||
/// If the instruction has higher cost when predicated, it's returned via
|
||||
/// PredCost.
|
||||
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
|
||||
const MachineInstr *MI,
|
||||
unsigned *PredCost = 0) const;
|
||||
|
||||
virtual int getInstrLatency(const InstrItineraryData *ItinData,
|
||||
SDNode *Node) const;
|
||||
|
||||
/// Return the default expected latency for a def based on it's opcode.
|
||||
unsigned defaultDefLatency(const MCSchedModel *SchedModel,
|
||||
const MachineInstr *DefMI) const;
|
||||
|
||||
int computeDefOperandLatency(const InstrItineraryData *ItinData,
|
||||
const MachineInstr *DefMI, bool FindMin) const;
|
||||
|
||||
/// isHighLatencyDef - Return true if this opcode has high latency to its
|
||||
/// result.
|
||||
virtual bool isHighLatencyDef(int opc) const { return false; }
|
||||
|
||||
/// hasHighOperandLatency - Compute operand latency between a def of 'Reg'
|
||||
/// and an use in the current loop, return true if the target considered
|
||||
/// it 'high'. This is used by optimization passes such as machine LICM to
|
||||
/// determine whether it makes sense to hoist an instruction out even in
|
||||
/// high register pressure situation.
|
||||
virtual
|
||||
bool hasHighOperandLatency(const InstrItineraryData *ItinData,
|
||||
const MachineRegisterInfo *MRI,
|
||||
const MachineInstr *DefMI, unsigned DefIdx,
|
||||
const MachineInstr *UseMI, unsigned UseIdx) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// hasLowDefLatency - Compute operand latency of a def of 'Reg', return true
|
||||
/// if the target considered it 'low'.
|
||||
virtual
|
||||
bool hasLowDefLatency(const InstrItineraryData *ItinData,
|
||||
const MachineInstr *DefMI, unsigned DefIdx) const;
|
||||
|
||||
/// verifyInstruction - Perform target specific instruction verification.
|
||||
virtual
|
||||
bool verifyInstruction(const MachineInstr *MI, StringRef &ErrInfo) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
/// getExecutionDomain - Return the current execution domain and bit mask of
|
||||
/// possible domains for instruction.
|
||||
///
|
||||
/// Some micro-architectures have multiple execution domains, and multiple
|
||||
/// opcodes that perform the same operation in different domains. For
|
||||
/// example, the x86 architecture provides the por, orps, and orpd
|
||||
/// instructions that all do the same thing. There is a latency penalty if a
|
||||
/// register is written in one domain and read in another.
|
||||
///
|
||||
/// This function returns a pair (domain, mask) containing the execution
|
||||
/// domain of MI, and a bit mask of possible domains. The setExecutionDomain
|
||||
/// function can be used to change the opcode to one of the domains in the
|
||||
/// bit mask. Instructions whose execution domain can't be changed should
|
||||
/// return a 0 mask.
|
||||
///
|
||||
/// The execution domain numbers don't have any special meaning except domain
|
||||
/// 0 is used for instructions that are not associated with any interesting
|
||||
/// execution domain.
|
||||
///
|
||||
virtual std::pair<uint16_t, uint16_t>
|
||||
getExecutionDomain(const MachineInstr *MI) const {
|
||||
return std::make_pair(0, 0);
|
||||
}
|
||||
|
||||
/// setExecutionDomain - Change the opcode of MI to execute in Domain.
|
||||
///
|
||||
/// The bit (1 << Domain) must be set in the mask returned from
|
||||
/// getExecutionDomain(MI).
|
||||
///
|
||||
virtual void setExecutionDomain(MachineInstr *MI, unsigned Domain) const {}
|
||||
|
||||
|
||||
/// getPartialRegUpdateClearance - Returns the preferred minimum clearance
|
||||
/// before an instruction with an unwanted partial register update.
|
||||
///
|
||||
/// Some instructions only write part of a register, and implicitly need to
|
||||
/// read the other parts of the register. This may cause unwanted stalls
|
||||
/// preventing otherwise unrelated instructions from executing in parallel in
|
||||
/// an out-of-order CPU.
|
||||
///
|
||||
/// For example, the x86 instruction cvtsi2ss writes its result to bits
|
||||
/// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so
|
||||
/// the instruction needs to wait for the old value of the register to become
|
||||
/// available:
|
||||
///
|
||||
/// addps %xmm1, %xmm0
|
||||
/// movaps %xmm0, (%rax)
|
||||
/// cvtsi2ss %rbx, %xmm0
|
||||
///
|
||||
/// In the code above, the cvtsi2ss instruction needs to wait for the addps
|
||||
/// instruction before it can issue, even though the high bits of %xmm0
|
||||
/// probably aren't needed.
|
||||
///
|
||||
/// This hook returns the preferred clearance before MI, measured in
|
||||
/// instructions. Other defs of MI's operand OpNum are avoided in the last N
|
||||
/// instructions before MI. It should only return a positive value for
|
||||
/// unwanted dependencies. If the old bits of the defined register have
|
||||
/// useful values, or if MI is determined to otherwise read the dependency,
|
||||
/// the hook should return 0.
|
||||
///
|
||||
/// The unwanted dependency may be handled by:
|
||||
///
|
||||
/// 1. Allocating the same register for an MI def and use. That makes the
|
||||
/// unwanted dependency identical to a required dependency.
|
||||
///
|
||||
/// 2. Allocating a register for the def that has no defs in the previous N
|
||||
/// instructions.
|
||||
///
|
||||
/// 3. Calling breakPartialRegDependency() with the same arguments. This
|
||||
/// allows the target to insert a dependency breaking instruction.
|
||||
///
|
||||
virtual unsigned
|
||||
getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
// The default implementation returns 0 for no partial register dependency.
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// breakPartialRegDependency - Insert a dependency-breaking instruction
|
||||
/// before MI to eliminate an unwanted dependency on OpNum.
|
||||
///
|
||||
/// If it wasn't possible to avoid a def in the last N instructions before MI
|
||||
/// (see getPartialRegUpdateClearance), this hook will be called to break the
|
||||
/// unwanted dependency.
|
||||
///
|
||||
/// On x86, an xorps instruction can be used as a dependency breaker:
|
||||
///
|
||||
/// addps %xmm1, %xmm0
|
||||
/// movaps %xmm0, (%rax)
|
||||
/// xorps %xmm0, %xmm0
|
||||
/// cvtsi2ss %rbx, %xmm0
|
||||
///
|
||||
/// An <imp-kill> operand should be added to MI if an instruction was
|
||||
/// inserted. This ties the instructions together in the post-ra scheduler.
|
||||
///
|
||||
virtual void
|
||||
breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
|
||||
const TargetRegisterInfo *TRI) const {}
|
||||
|
||||
/// Create machine specific model for scheduling.
|
||||
virtual DFAPacketizer*
|
||||
CreateTargetScheduleState(const TargetMachine*, const ScheduleDAG*) const {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
private:
|
||||
int CallFrameSetupOpcode, CallFrameDestroyOpcode;
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
65
thirdparty/clang/include/llvm/Target/TargetIntrinsicInfo.h
vendored
Normal file
65
thirdparty/clang/include/llvm/Target/TargetIntrinsicInfo.h
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
//===-- llvm/Target/TargetIntrinsicInfo.h - Instruction Info ----*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file describes the target intrinsic instructions to the code generator.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_TARGET_TARGETINTRINSICINFO_H
|
||||
#define LLVM_TARGET_TARGETINTRINSICINFO_H
|
||||
|
||||
#include "llvm/Support/Compiler.h"
|
||||
#include <string>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class Function;
|
||||
class Module;
|
||||
class Type;
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
///
|
||||
/// TargetIntrinsicInfo - Interface to description of machine instruction set
|
||||
///
|
||||
class TargetIntrinsicInfo {
|
||||
TargetIntrinsicInfo(const TargetIntrinsicInfo &) LLVM_DELETED_FUNCTION;
|
||||
void operator=(const TargetIntrinsicInfo &) LLVM_DELETED_FUNCTION;
|
||||
public:
|
||||
TargetIntrinsicInfo();
|
||||
virtual ~TargetIntrinsicInfo();
|
||||
|
||||
/// Return the name of a target intrinsic, e.g. "llvm.bfin.ssync".
|
||||
/// The Tys and numTys parameters are for intrinsics with overloaded types
|
||||
/// (e.g., those using iAny or fAny). For a declaration for an overloaded
|
||||
/// intrinsic, Tys should point to an array of numTys pointers to Type,
|
||||
/// and must provide exactly one type for each overloaded type in the
|
||||
/// intrinsic.
|
||||
virtual std::string getName(unsigned IID, Type **Tys = 0,
|
||||
unsigned numTys = 0) const = 0;
|
||||
|
||||
/// Look up target intrinsic by name. Return intrinsic ID or 0 for unknown
|
||||
/// names.
|
||||
virtual unsigned lookupName(const char *Name, unsigned Len) const =0;
|
||||
|
||||
/// Return the target intrinsic ID of a function, or 0.
|
||||
virtual unsigned getIntrinsicID(Function *F) const;
|
||||
|
||||
/// Returns true if the intrinsic can be overloaded.
|
||||
virtual bool isOverloaded(unsigned IID) const = 0;
|
||||
|
||||
/// Create or insert an LLVM Function declaration for an intrinsic,
|
||||
/// and return it. The Tys and numTys are for intrinsics with overloaded
|
||||
/// types. See above for more information.
|
||||
virtual Function *getDeclaration(Module *M, unsigned ID, Type **Tys = 0,
|
||||
unsigned numTys = 0) const = 0;
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
137
thirdparty/clang/include/llvm/Target/TargetJITInfo.h
vendored
Normal file
137
thirdparty/clang/include/llvm/Target/TargetJITInfo.h
vendored
Normal file
@@ -0,0 +1,137 @@
|
||||
//===- Target/TargetJITInfo.h - Target Information for JIT ------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file exposes an abstract interface used by the Just-In-Time code
|
||||
// generator to perform target-specific activities, such as emitting stubs. If
|
||||
// a TargetMachine supports JIT code generation, it should provide one of these
|
||||
// objects through the getJITInfo() method.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_TARGET_TARGETJITINFO_H
|
||||
#define LLVM_TARGET_TARGETJITINFO_H
|
||||
|
||||
#include "llvm/Support/DataTypes.h"
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
#include <cassert>
|
||||
|
||||
namespace llvm {
|
||||
class Function;
|
||||
class GlobalValue;
|
||||
class JITCodeEmitter;
|
||||
class MachineRelocation;
|
||||
|
||||
/// TargetJITInfo - Target specific information required by the Just-In-Time
|
||||
/// code generator.
|
||||
class TargetJITInfo {
|
||||
virtual void anchor();
|
||||
public:
|
||||
virtual ~TargetJITInfo() {}
|
||||
|
||||
/// replaceMachineCodeForFunction - Make it so that calling the function
|
||||
/// whose machine code is at OLD turns into a call to NEW, perhaps by
|
||||
/// overwriting OLD with a branch to NEW. This is used for self-modifying
|
||||
/// code.
|
||||
///
|
||||
virtual void replaceMachineCodeForFunction(void *Old, void *New) = 0;
|
||||
|
||||
/// emitGlobalValueIndirectSym - Use the specified JITCodeEmitter object
|
||||
/// to emit an indirect symbol which contains the address of the specified
|
||||
/// ptr.
|
||||
virtual void *emitGlobalValueIndirectSym(const GlobalValue* GV, void *ptr,
|
||||
JITCodeEmitter &JCE) {
|
||||
llvm_unreachable("This target doesn't implement "
|
||||
"emitGlobalValueIndirectSym!");
|
||||
}
|
||||
|
||||
/// Records the required size and alignment for a call stub in bytes.
|
||||
struct StubLayout {
|
||||
size_t Size;
|
||||
size_t Alignment;
|
||||
};
|
||||
/// Returns the maximum size and alignment for a call stub on this target.
|
||||
virtual StubLayout getStubLayout() {
|
||||
llvm_unreachable("This target doesn't implement getStubLayout!");
|
||||
}
|
||||
|
||||
/// emitFunctionStub - Use the specified JITCodeEmitter object to emit a
|
||||
/// small native function that simply calls the function at the specified
|
||||
/// address. The JITCodeEmitter must already have storage allocated for the
|
||||
/// stub. Return the address of the resultant function, which may have been
|
||||
/// aligned from the address the JCE was set up to emit at.
|
||||
virtual void *emitFunctionStub(const Function* F, void *Target,
|
||||
JITCodeEmitter &JCE) {
|
||||
llvm_unreachable("This target doesn't implement emitFunctionStub!");
|
||||
}
|
||||
|
||||
/// getPICJumpTableEntry - Returns the value of the jumptable entry for the
|
||||
/// specific basic block.
|
||||
virtual uintptr_t getPICJumpTableEntry(uintptr_t BB, uintptr_t JTBase) {
|
||||
llvm_unreachable("This target doesn't implement getPICJumpTableEntry!");
|
||||
}
|
||||
|
||||
/// LazyResolverFn - This typedef is used to represent the function that
|
||||
/// unresolved call points should invoke. This is a target specific
|
||||
/// function that knows how to walk the stack and find out which stub the
|
||||
/// call is coming from.
|
||||
typedef void (*LazyResolverFn)();
|
||||
|
||||
/// JITCompilerFn - This typedef is used to represent the JIT function that
|
||||
/// lazily compiles the function corresponding to a stub. The JIT keeps
|
||||
/// track of the mapping between stubs and LLVM Functions, the target
|
||||
/// provides the ability to figure out the address of a stub that is called
|
||||
/// by the LazyResolverFn.
|
||||
typedef void* (*JITCompilerFn)(void *);
|
||||
|
||||
/// getLazyResolverFunction - This method is used to initialize the JIT,
|
||||
/// giving the target the function that should be used to compile a
|
||||
/// function, and giving the JIT the target function used to do the lazy
|
||||
/// resolving.
|
||||
virtual LazyResolverFn getLazyResolverFunction(JITCompilerFn) {
|
||||
llvm_unreachable("Not implemented for this target!");
|
||||
}
|
||||
|
||||
/// relocate - Before the JIT can run a block of code that has been emitted,
|
||||
/// it must rewrite the code to contain the actual addresses of any
|
||||
/// referenced global symbols.
|
||||
virtual void relocate(void *Function, MachineRelocation *MR,
|
||||
unsigned NumRelocs, unsigned char* GOTBase) {
|
||||
assert(NumRelocs == 0 && "This target does not have relocations!");
|
||||
}
|
||||
|
||||
|
||||
/// allocateThreadLocalMemory - Each target has its own way of
|
||||
/// handling thread local variables. This method returns a value only
|
||||
/// meaningful to the target.
|
||||
virtual char* allocateThreadLocalMemory(size_t size) {
|
||||
llvm_unreachable("This target does not implement thread local storage!");
|
||||
}
|
||||
|
||||
/// needsGOT - Allows a target to specify that it would like the
|
||||
/// JIT to manage a GOT for it.
|
||||
bool needsGOT() const { return useGOT; }
|
||||
|
||||
/// hasCustomConstantPool - Allows a target to specify that constant
|
||||
/// pool address resolution is handled by the target.
|
||||
virtual bool hasCustomConstantPool() const { return false; }
|
||||
|
||||
/// hasCustomJumpTables - Allows a target to specify that jumptables
|
||||
/// are emitted by the target.
|
||||
virtual bool hasCustomJumpTables() const { return false; }
|
||||
|
||||
/// allocateSeparateGVMemory - If true, globals should be placed in
|
||||
/// separately allocated heap memory rather than in the same
|
||||
/// code memory allocated by JITCodeEmitter.
|
||||
virtual bool allocateSeparateGVMemory() const { return false; }
|
||||
protected:
|
||||
bool useGOT;
|
||||
};
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
730
thirdparty/clang/include/llvm/Target/TargetLibraryInfo.h
vendored
Normal file
730
thirdparty/clang/include/llvm/Target/TargetLibraryInfo.h
vendored
Normal file
@@ -0,0 +1,730 @@
|
||||
//===-- llvm/Target/TargetLibraryInfo.h - Library information ---*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_TARGET_TARGETLIBRARYINFO_H
|
||||
#define LLVM_TARGET_TARGETLIBRARYINFO_H
|
||||
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/Pass.h"
|
||||
|
||||
namespace llvm {
|
||||
class Triple;
|
||||
|
||||
namespace LibFunc {
|
||||
enum Func {
|
||||
/// int _IO_getc(_IO_FILE * __fp);
|
||||
under_IO_getc,
|
||||
/// int _IO_putc(int __c, _IO_FILE * __fp);
|
||||
under_IO_putc,
|
||||
/// void operator delete[](void*);
|
||||
ZdaPv,
|
||||
/// void operator delete(void*);
|
||||
ZdlPv,
|
||||
/// void *new[](unsigned int);
|
||||
Znaj,
|
||||
/// void *new[](unsigned int, nothrow);
|
||||
ZnajRKSt9nothrow_t,
|
||||
/// void *new[](unsigned long);
|
||||
Znam,
|
||||
/// void *new[](unsigned long, nothrow);
|
||||
ZnamRKSt9nothrow_t,
|
||||
/// void *new(unsigned int);
|
||||
Znwj,
|
||||
/// void *new(unsigned int, nothrow);
|
||||
ZnwjRKSt9nothrow_t,
|
||||
/// void *new(unsigned long);
|
||||
Znwm,
|
||||
/// void *new(unsigned long, nothrow);
|
||||
ZnwmRKSt9nothrow_t,
|
||||
/// int __cxa_atexit(void (*f)(void *), void *p, void *d);
|
||||
cxa_atexit,
|
||||
/// void __cxa_guard_abort(guard_t *guard);
|
||||
/// guard_t is int64_t in Itanium ABI or int32_t on ARM eabi.
|
||||
cxa_guard_abort,
|
||||
/// int __cxa_guard_acquire(guard_t *guard);
|
||||
cxa_guard_acquire,
|
||||
/// void __cxa_guard_release(guard_t *guard);
|
||||
cxa_guard_release,
|
||||
/// int __isoc99_scanf (const char *format, ...)
|
||||
dunder_isoc99_scanf,
|
||||
/// int __isoc99_sscanf(const char *s, const char *format, ...)
|
||||
dunder_isoc99_sscanf,
|
||||
/// void *__memcpy_chk(void *s1, const void *s2, size_t n, size_t s1size);
|
||||
memcpy_chk,
|
||||
/// char * __strdup(const char *s);
|
||||
dunder_strdup,
|
||||
/// char *__strndup(const char *s, size_t n);
|
||||
dunder_strndup,
|
||||
/// char * __strtok_r(char *s, const char *delim, char **save_ptr);
|
||||
dunder_strtok_r,
|
||||
/// int abs(int j);
|
||||
abs,
|
||||
/// int access(const char *path, int amode);
|
||||
access,
|
||||
/// double acos(double x);
|
||||
acos,
|
||||
/// float acosf(float x);
|
||||
acosf,
|
||||
/// double acosh(double x);
|
||||
acosh,
|
||||
/// float acoshf(float x);
|
||||
acoshf,
|
||||
/// long double acoshl(long double x);
|
||||
acoshl,
|
||||
/// long double acosl(long double x);
|
||||
acosl,
|
||||
/// double asin(double x);
|
||||
asin,
|
||||
/// float asinf(float x);
|
||||
asinf,
|
||||
/// double asinh(double x);
|
||||
asinh,
|
||||
/// float asinhf(float x);
|
||||
asinhf,
|
||||
/// long double asinhl(long double x);
|
||||
asinhl,
|
||||
/// long double asinl(long double x);
|
||||
asinl,
|
||||
/// double atan(double x);
|
||||
atan,
|
||||
/// double atan2(double y, double x);
|
||||
atan2,
|
||||
/// float atan2f(float y, float x);
|
||||
atan2f,
|
||||
/// long double atan2l(long double y, long double x);
|
||||
atan2l,
|
||||
/// float atanf(float x);
|
||||
atanf,
|
||||
/// double atanh(double x);
|
||||
atanh,
|
||||
/// float atanhf(float x);
|
||||
atanhf,
|
||||
/// long double atanhl(long double x);
|
||||
atanhl,
|
||||
/// long double atanl(long double x);
|
||||
atanl,
|
||||
/// double atof(const char *str);
|
||||
atof,
|
||||
/// int atoi(const char *str);
|
||||
atoi,
|
||||
/// long atol(const char *str);
|
||||
atol,
|
||||
/// long long atoll(const char *nptr);
|
||||
atoll,
|
||||
/// int bcmp(const void *s1, const void *s2, size_t n);
|
||||
bcmp,
|
||||
/// void bcopy(const void *s1, void *s2, size_t n);
|
||||
bcopy,
|
||||
/// void bzero(void *s, size_t n);
|
||||
bzero,
|
||||
/// void *calloc(size_t count, size_t size);
|
||||
calloc,
|
||||
/// double cbrt(double x);
|
||||
cbrt,
|
||||
/// float cbrtf(float x);
|
||||
cbrtf,
|
||||
/// long double cbrtl(long double x);
|
||||
cbrtl,
|
||||
/// double ceil(double x);
|
||||
ceil,
|
||||
/// float ceilf(float x);
|
||||
ceilf,
|
||||
/// long double ceill(long double x);
|
||||
ceill,
|
||||
/// int chmod(const char *path, mode_t mode);
|
||||
chmod,
|
||||
/// int chown(const char *path, uid_t owner, gid_t group);
|
||||
chown,
|
||||
/// void clearerr(FILE *stream);
|
||||
clearerr,
|
||||
/// int closedir(DIR *dirp);
|
||||
closedir,
|
||||
/// double copysign(double x, double y);
|
||||
copysign,
|
||||
/// float copysignf(float x, float y);
|
||||
copysignf,
|
||||
/// long double copysignl(long double x, long double y);
|
||||
copysignl,
|
||||
/// double cos(double x);
|
||||
cos,
|
||||
/// float cosf(float x);
|
||||
cosf,
|
||||
/// double cosh(double x);
|
||||
cosh,
|
||||
/// float coshf(float x);
|
||||
coshf,
|
||||
/// long double coshl(long double x);
|
||||
coshl,
|
||||
/// long double cosl(long double x);
|
||||
cosl,
|
||||
/// char *ctermid(char *s);
|
||||
ctermid,
|
||||
/// double exp(double x);
|
||||
exp,
|
||||
/// double exp10(double x);
|
||||
exp10,
|
||||
/// float exp10f(float x);
|
||||
exp10f,
|
||||
/// long double exp10l(long double x);
|
||||
exp10l,
|
||||
/// double exp2(double x);
|
||||
exp2,
|
||||
/// float exp2f(float x);
|
||||
exp2f,
|
||||
/// long double exp2l(long double x);
|
||||
exp2l,
|
||||
/// float expf(float x);
|
||||
expf,
|
||||
/// long double expl(long double x);
|
||||
expl,
|
||||
/// double expm1(double x);
|
||||
expm1,
|
||||
/// float expm1f(float x);
|
||||
expm1f,
|
||||
/// long double expm1l(long double x);
|
||||
expm1l,
|
||||
/// double fabs(double x);
|
||||
fabs,
|
||||
/// float fabsf(float x);
|
||||
fabsf,
|
||||
/// long double fabsl(long double x);
|
||||
fabsl,
|
||||
/// int fclose(FILE *stream);
|
||||
fclose,
|
||||
/// FILE *fdopen(int fildes, const char *mode);
|
||||
fdopen,
|
||||
/// int feof(FILE *stream);
|
||||
feof,
|
||||
/// int ferror(FILE *stream);
|
||||
ferror,
|
||||
/// int fflush(FILE *stream);
|
||||
fflush,
|
||||
/// int ffs(int i);
|
||||
ffs,
|
||||
/// int ffsl(long int i);
|
||||
ffsl,
|
||||
/// int ffsll(long long int i);
|
||||
ffsll,
|
||||
/// int fgetc(FILE *stream);
|
||||
fgetc,
|
||||
/// int fgetpos(FILE *stream, fpos_t *pos);
|
||||
fgetpos,
|
||||
/// char *fgets(char *s, int n, FILE *stream);
|
||||
fgets,
|
||||
/// int fileno(FILE *stream);
|
||||
fileno,
|
||||
/// int fiprintf(FILE *stream, const char *format, ...);
|
||||
fiprintf,
|
||||
/// void flockfile(FILE *file);
|
||||
flockfile,
|
||||
/// double floor(double x);
|
||||
floor,
|
||||
/// float floorf(float x);
|
||||
floorf,
|
||||
/// long double floorl(long double x);
|
||||
floorl,
|
||||
/// double fmod(double x, double y);
|
||||
fmod,
|
||||
/// float fmodf(float x, float y);
|
||||
fmodf,
|
||||
/// long double fmodl(long double x, long double y);
|
||||
fmodl,
|
||||
/// FILE *fopen(const char *filename, const char *mode);
|
||||
fopen,
|
||||
/// FILE *fopen64(const char *filename, const char *opentype)
|
||||
fopen64,
|
||||
/// int fprintf(FILE *stream, const char *format, ...);
|
||||
fprintf,
|
||||
/// int fputc(int c, FILE *stream);
|
||||
fputc,
|
||||
/// int fputs(const char *s, FILE *stream);
|
||||
fputs,
|
||||
/// size_t fread(void *ptr, size_t size, size_t nitems, FILE *stream);
|
||||
fread,
|
||||
/// void free(void *ptr);
|
||||
free,
|
||||
/// double frexp(double num, int *exp);
|
||||
frexp,
|
||||
/// float frexpf(float num, int *exp);
|
||||
frexpf,
|
||||
/// long double frexpl(long double num, int *exp);
|
||||
frexpl,
|
||||
/// int fscanf(FILE *stream, const char *format, ... );
|
||||
fscanf,
|
||||
/// int fseek(FILE *stream, long offset, int whence);
|
||||
fseek,
|
||||
/// int fseeko(FILE *stream, off_t offset, int whence);
|
||||
fseeko,
|
||||
/// int fseeko64(FILE *stream, off64_t offset, int whence)
|
||||
fseeko64,
|
||||
/// int fsetpos(FILE *stream, const fpos_t *pos);
|
||||
fsetpos,
|
||||
/// int fstat(int fildes, struct stat *buf);
|
||||
fstat,
|
||||
/// int fstat64(int filedes, struct stat64 *buf)
|
||||
fstat64,
|
||||
/// int fstatvfs(int fildes, struct statvfs *buf);
|
||||
fstatvfs,
|
||||
/// int fstatvfs64(int fildes, struct statvfs64 *buf);
|
||||
fstatvfs64,
|
||||
/// long ftell(FILE *stream);
|
||||
ftell,
|
||||
/// off_t ftello(FILE *stream);
|
||||
ftello,
|
||||
/// off64_t ftello64(FILE *stream)
|
||||
ftello64,
|
||||
/// int ftrylockfile(FILE *file);
|
||||
ftrylockfile,
|
||||
/// void funlockfile(FILE *file);
|
||||
funlockfile,
|
||||
/// size_t fwrite(const void *ptr, size_t size, size_t nitems,
|
||||
/// FILE *stream);
|
||||
fwrite,
|
||||
/// int getc(FILE *stream);
|
||||
getc,
|
||||
/// int getc_unlocked(FILE *stream);
|
||||
getc_unlocked,
|
||||
/// int getchar(void);
|
||||
getchar,
|
||||
/// char *getenv(const char *name);
|
||||
getenv,
|
||||
/// int getitimer(int which, struct itimerval *value);
|
||||
getitimer,
|
||||
/// int getlogin_r(char *name, size_t namesize);
|
||||
getlogin_r,
|
||||
/// struct passwd *getpwnam(const char *name);
|
||||
getpwnam,
|
||||
/// char *gets(char *s);
|
||||
gets,
|
||||
/// uint32_t htonl(uint32_t hostlong);
|
||||
htonl,
|
||||
/// uint16_t htons(uint16_t hostshort);
|
||||
htons,
|
||||
/// int iprintf(const char *format, ...);
|
||||
iprintf,
|
||||
/// int isascii(int c);
|
||||
isascii,
|
||||
/// int isdigit(int c);
|
||||
isdigit,
|
||||
/// long int labs(long int j);
|
||||
labs,
|
||||
/// int lchown(const char *path, uid_t owner, gid_t group);
|
||||
lchown,
|
||||
/// long long int llabs(long long int j);
|
||||
llabs,
|
||||
/// double log(double x);
|
||||
log,
|
||||
/// double log10(double x);
|
||||
log10,
|
||||
/// float log10f(float x);
|
||||
log10f,
|
||||
/// long double log10l(long double x);
|
||||
log10l,
|
||||
/// double log1p(double x);
|
||||
log1p,
|
||||
/// float log1pf(float x);
|
||||
log1pf,
|
||||
/// long double log1pl(long double x);
|
||||
log1pl,
|
||||
/// double log2(double x);
|
||||
log2,
|
||||
/// float log2f(float x);
|
||||
log2f,
|
||||
/// double long double log2l(long double x);
|
||||
log2l,
|
||||
/// double logb(double x);
|
||||
logb,
|
||||
/// float logbf(float x);
|
||||
logbf,
|
||||
/// long double logbl(long double x);
|
||||
logbl,
|
||||
/// float logf(float x);
|
||||
logf,
|
||||
/// long double logl(long double x);
|
||||
logl,
|
||||
/// int lstat(const char *path, struct stat *buf);
|
||||
lstat,
|
||||
/// int lstat64(const char *path, struct stat64 *buf);
|
||||
lstat64,
|
||||
/// void *malloc(size_t size);
|
||||
malloc,
|
||||
/// void *memalign(size_t boundary, size_t size);
|
||||
memalign,
|
||||
/// void *memccpy(void *s1, const void *s2, int c, size_t n);
|
||||
memccpy,
|
||||
/// void *memchr(const void *s, int c, size_t n);
|
||||
memchr,
|
||||
/// int memcmp(const void *s1, const void *s2, size_t n);
|
||||
memcmp,
|
||||
/// void *memcpy(void *s1, const void *s2, size_t n);
|
||||
memcpy,
|
||||
/// void *memmove(void *s1, const void *s2, size_t n);
|
||||
memmove,
|
||||
// void *memrchr(const void *s, int c, size_t n);
|
||||
memrchr,
|
||||
/// void *memset(void *b, int c, size_t len);
|
||||
memset,
|
||||
/// void memset_pattern16(void *b, const void *pattern16, size_t len);
|
||||
memset_pattern16,
|
||||
/// int mkdir(const char *path, mode_t mode);
|
||||
mkdir,
|
||||
/// time_t mktime(struct tm *timeptr);
|
||||
mktime,
|
||||
/// double modf(double x, double *iptr);
|
||||
modf,
|
||||
/// float modff(float, float *iptr);
|
||||
modff,
|
||||
/// long double modfl(long double value, long double *iptr);
|
||||
modfl,
|
||||
/// double nearbyint(double x);
|
||||
nearbyint,
|
||||
/// float nearbyintf(float x);
|
||||
nearbyintf,
|
||||
/// long double nearbyintl(long double x);
|
||||
nearbyintl,
|
||||
/// uint32_t ntohl(uint32_t netlong);
|
||||
ntohl,
|
||||
/// uint16_t ntohs(uint16_t netshort);
|
||||
ntohs,
|
||||
/// int open(const char *path, int oflag, ... );
|
||||
open,
|
||||
/// int open64(const char *filename, int flags[, mode_t mode])
|
||||
open64,
|
||||
/// DIR *opendir(const char *dirname);
|
||||
opendir,
|
||||
/// int pclose(FILE *stream);
|
||||
pclose,
|
||||
/// void perror(const char *s);
|
||||
perror,
|
||||
/// FILE *popen(const char *command, const char *mode);
|
||||
popen,
|
||||
/// int posix_memalign(void **memptr, size_t alignment, size_t size);
|
||||
posix_memalign,
|
||||
/// double pow(double x, double y);
|
||||
pow,
|
||||
/// float powf(float x, float y);
|
||||
powf,
|
||||
/// long double powl(long double x, long double y);
|
||||
powl,
|
||||
/// ssize_t pread(int fildes, void *buf, size_t nbyte, off_t offset);
|
||||
pread,
|
||||
/// int printf(const char *format, ...);
|
||||
printf,
|
||||
/// int putc(int c, FILE *stream);
|
||||
putc,
|
||||
/// int putchar(int c);
|
||||
putchar,
|
||||
/// int puts(const char *s);
|
||||
puts,
|
||||
/// ssize_t pwrite(int fildes, const void *buf, size_t nbyte,
|
||||
/// off_t offset);
|
||||
pwrite,
|
||||
/// void qsort(void *base, size_t nel, size_t width,
|
||||
/// int (*compar)(const void *, const void *));
|
||||
qsort,
|
||||
/// ssize_t read(int fildes, void *buf, size_t nbyte);
|
||||
read,
|
||||
/// ssize_t readlink(const char *path, char *buf, size_t bufsize);
|
||||
readlink,
|
||||
/// void *realloc(void *ptr, size_t size);
|
||||
realloc,
|
||||
/// void *reallocf(void *ptr, size_t size);
|
||||
reallocf,
|
||||
/// char *realpath(const char *file_name, char *resolved_name);
|
||||
realpath,
|
||||
/// int remove(const char *path);
|
||||
remove,
|
||||
/// int rename(const char *old, const char *new);
|
||||
rename,
|
||||
/// void rewind(FILE *stream);
|
||||
rewind,
|
||||
/// double rint(double x);
|
||||
rint,
|
||||
/// float rintf(float x);
|
||||
rintf,
|
||||
/// long double rintl(long double x);
|
||||
rintl,
|
||||
/// int rmdir(const char *path);
|
||||
rmdir,
|
||||
/// double round(double x);
|
||||
round,
|
||||
/// float roundf(float x);
|
||||
roundf,
|
||||
/// long double roundl(long double x);
|
||||
roundl,
|
||||
/// int scanf(const char *restrict format, ... );
|
||||
scanf,
|
||||
/// void setbuf(FILE *stream, char *buf);
|
||||
setbuf,
|
||||
/// int setitimer(int which, const struct itimerval *value,
|
||||
/// struct itimerval *ovalue);
|
||||
setitimer,
|
||||
/// int setvbuf(FILE *stream, char *buf, int type, size_t size);
|
||||
setvbuf,
|
||||
/// double sin(double x);
|
||||
sin,
|
||||
/// float sinf(float x);
|
||||
sinf,
|
||||
/// double sinh(double x);
|
||||
sinh,
|
||||
/// float sinhf(float x);
|
||||
sinhf,
|
||||
/// long double sinhl(long double x);
|
||||
sinhl,
|
||||
/// long double sinl(long double x);
|
||||
sinl,
|
||||
/// int siprintf(char *str, const char *format, ...);
|
||||
siprintf,
|
||||
/// int snprintf(char *s, size_t n, const char *format, ...);
|
||||
snprintf,
|
||||
/// int sprintf(char *str, const char *format, ...);
|
||||
sprintf,
|
||||
/// double sqrt(double x);
|
||||
sqrt,
|
||||
/// float sqrtf(float x);
|
||||
sqrtf,
|
||||
/// long double sqrtl(long double x);
|
||||
sqrtl,
|
||||
/// int sscanf(const char *s, const char *format, ... );
|
||||
sscanf,
|
||||
/// int stat(const char *path, struct stat *buf);
|
||||
stat,
|
||||
/// int stat64(const char *path, struct stat64 *buf);
|
||||
stat64,
|
||||
/// int statvfs(const char *path, struct statvfs *buf);
|
||||
statvfs,
|
||||
/// int statvfs64(const char *path, struct statvfs64 *buf)
|
||||
statvfs64,
|
||||
/// char *stpcpy(char *s1, const char *s2);
|
||||
stpcpy,
|
||||
/// char *stpncpy(char *s1, const char *s2, size_t n);
|
||||
stpncpy,
|
||||
/// int strcasecmp(const char *s1, const char *s2);
|
||||
strcasecmp,
|
||||
/// char *strcat(char *s1, const char *s2);
|
||||
strcat,
|
||||
/// char *strchr(const char *s, int c);
|
||||
strchr,
|
||||
/// int strcmp(const char *s1, const char *s2);
|
||||
strcmp,
|
||||
/// int strcoll(const char *s1, const char *s2);
|
||||
strcoll,
|
||||
/// char *strcpy(char *s1, const char *s2);
|
||||
strcpy,
|
||||
/// size_t strcspn(const char *s1, const char *s2);
|
||||
strcspn,
|
||||
/// char *strdup(const char *s1);
|
||||
strdup,
|
||||
/// size_t strlen(const char *s);
|
||||
strlen,
|
||||
/// int strncasecmp(const char *s1, const char *s2, size_t n);
|
||||
strncasecmp,
|
||||
/// char *strncat(char *s1, const char *s2, size_t n);
|
||||
strncat,
|
||||
/// int strncmp(const char *s1, const char *s2, size_t n);
|
||||
strncmp,
|
||||
/// char *strncpy(char *s1, const char *s2, size_t n);
|
||||
strncpy,
|
||||
/// char *strndup(const char *s1, size_t n);
|
||||
strndup,
|
||||
/// size_t strnlen(const char *s, size_t maxlen);
|
||||
strnlen,
|
||||
/// char *strpbrk(const char *s1, const char *s2);
|
||||
strpbrk,
|
||||
/// char *strrchr(const char *s, int c);
|
||||
strrchr,
|
||||
/// size_t strspn(const char *s1, const char *s2);
|
||||
strspn,
|
||||
/// char *strstr(const char *s1, const char *s2);
|
||||
strstr,
|
||||
/// double strtod(const char *nptr, char **endptr);
|
||||
strtod,
|
||||
/// float strtof(const char *nptr, char **endptr);
|
||||
strtof,
|
||||
// char *strtok(char *s1, const char *s2);
|
||||
strtok,
|
||||
// char *strtok_r(char *s, const char *sep, char **lasts);
|
||||
strtok_r,
|
||||
/// long int strtol(const char *nptr, char **endptr, int base);
|
||||
strtol,
|
||||
/// long double strtold(const char *nptr, char **endptr);
|
||||
strtold,
|
||||
/// long long int strtoll(const char *nptr, char **endptr, int base);
|
||||
strtoll,
|
||||
/// unsigned long int strtoul(const char *nptr, char **endptr, int base);
|
||||
strtoul,
|
||||
/// unsigned long long int strtoull(const char *nptr, char **endptr,
|
||||
/// int base);
|
||||
strtoull,
|
||||
/// size_t strxfrm(char *s1, const char *s2, size_t n);
|
||||
strxfrm,
|
||||
/// int system(const char *command);
|
||||
system,
|
||||
/// double tan(double x);
|
||||
tan,
|
||||
/// float tanf(float x);
|
||||
tanf,
|
||||
/// double tanh(double x);
|
||||
tanh,
|
||||
/// float tanhf(float x);
|
||||
tanhf,
|
||||
/// long double tanhl(long double x);
|
||||
tanhl,
|
||||
/// long double tanl(long double x);
|
||||
tanl,
|
||||
/// clock_t times(struct tms *buffer);
|
||||
times,
|
||||
/// FILE *tmpfile(void);
|
||||
tmpfile,
|
||||
/// FILE *tmpfile64(void)
|
||||
tmpfile64,
|
||||
/// int toascii(int c);
|
||||
toascii,
|
||||
/// double trunc(double x);
|
||||
trunc,
|
||||
/// float truncf(float x);
|
||||
truncf,
|
||||
/// long double truncl(long double x);
|
||||
truncl,
|
||||
/// int uname(struct utsname *name);
|
||||
uname,
|
||||
/// int ungetc(int c, FILE *stream);
|
||||
ungetc,
|
||||
/// int unlink(const char *path);
|
||||
unlink,
|
||||
/// int unsetenv(const char *name);
|
||||
unsetenv,
|
||||
/// int utime(const char *path, const struct utimbuf *times);
|
||||
utime,
|
||||
/// int utimes(const char *path, const struct timeval times[2]);
|
||||
utimes,
|
||||
/// void *valloc(size_t size);
|
||||
valloc,
|
||||
/// int vfprintf(FILE *stream, const char *format, va_list ap);
|
||||
vfprintf,
|
||||
/// int vfscanf(FILE *stream, const char *format, va_list arg);
|
||||
vfscanf,
|
||||
/// int vprintf(const char *restrict format, va_list ap);
|
||||
vprintf,
|
||||
/// int vscanf(const char *format, va_list arg);
|
||||
vscanf,
|
||||
/// int vsnprintf(char *s, size_t n, const char *format, va_list ap);
|
||||
vsnprintf,
|
||||
/// int vsprintf(char *s, const char *format, va_list ap);
|
||||
vsprintf,
|
||||
/// int vsscanf(const char *s, const char *format, va_list arg);
|
||||
vsscanf,
|
||||
/// ssize_t write(int fildes, const void *buf, size_t nbyte);
|
||||
write,
|
||||
|
||||
NumLibFuncs
|
||||
};
|
||||
}
|
||||
|
||||
/// TargetLibraryInfo - This immutable pass captures information about what
|
||||
/// library functions are available for the current target, and allows a
|
||||
/// frontend to disable optimizations through -fno-builtin etc.
|
||||
class TargetLibraryInfo : public ImmutablePass {
|
||||
virtual void anchor();
|
||||
unsigned char AvailableArray[(LibFunc::NumLibFuncs+3)/4];
|
||||
llvm::DenseMap<unsigned, std::string> CustomNames;
|
||||
static const char* StandardNames[LibFunc::NumLibFuncs];
|
||||
|
||||
enum AvailabilityState {
|
||||
StandardName = 3, // (memset to all ones)
|
||||
CustomName = 1,
|
||||
Unavailable = 0 // (memset to all zeros)
|
||||
};
|
||||
void setState(LibFunc::Func F, AvailabilityState State) {
|
||||
AvailableArray[F/4] &= ~(3 << 2*(F&3));
|
||||
AvailableArray[F/4] |= State << 2*(F&3);
|
||||
}
|
||||
AvailabilityState getState(LibFunc::Func F) const {
|
||||
return static_cast<AvailabilityState>((AvailableArray[F/4] >> 2*(F&3)) & 3);
|
||||
}
|
||||
|
||||
public:
|
||||
static char ID;
|
||||
TargetLibraryInfo();
|
||||
TargetLibraryInfo(const Triple &T);
|
||||
explicit TargetLibraryInfo(const TargetLibraryInfo &TLI);
|
||||
|
||||
/// getLibFunc - Search for a particular function name. If it is one of the
|
||||
/// known library functions, return true and set F to the corresponding value.
|
||||
bool getLibFunc(StringRef funcName, LibFunc::Func &F) const;
|
||||
|
||||
/// has - This function is used by optimizations that want to match on or form
|
||||
/// a given library function.
|
||||
bool has(LibFunc::Func F) const {
|
||||
return getState(F) != Unavailable;
|
||||
}
|
||||
|
||||
/// hasOptimizedCodeGen - Return true if the function is both available as
|
||||
/// a builtin and a candidate for optimized code generation.
|
||||
bool hasOptimizedCodeGen(LibFunc::Func F) const {
|
||||
if (getState(F) == Unavailable)
|
||||
return false;
|
||||
switch (F) {
|
||||
default: break;
|
||||
case LibFunc::copysign: case LibFunc::copysignf: case LibFunc::copysignl:
|
||||
case LibFunc::fabs: case LibFunc::fabsf: case LibFunc::fabsl:
|
||||
case LibFunc::sin: case LibFunc::sinf: case LibFunc::sinl:
|
||||
case LibFunc::cos: case LibFunc::cosf: case LibFunc::cosl:
|
||||
case LibFunc::sqrt: case LibFunc::sqrtf: case LibFunc::sqrtl:
|
||||
case LibFunc::floor: case LibFunc::floorf: case LibFunc::floorl:
|
||||
case LibFunc::nearbyint: case LibFunc::nearbyintf: case LibFunc::nearbyintl:
|
||||
case LibFunc::ceil: case LibFunc::ceilf: case LibFunc::ceill:
|
||||
case LibFunc::rint: case LibFunc::rintf: case LibFunc::rintl:
|
||||
case LibFunc::trunc: case LibFunc::truncf: case LibFunc::truncl:
|
||||
case LibFunc::log2: case LibFunc::log2f: case LibFunc::log2l:
|
||||
case LibFunc::exp2: case LibFunc::exp2f: case LibFunc::exp2l:
|
||||
case LibFunc::memcmp:
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
StringRef getName(LibFunc::Func F) const {
|
||||
AvailabilityState State = getState(F);
|
||||
if (State == Unavailable)
|
||||
return StringRef();
|
||||
if (State == StandardName)
|
||||
return StandardNames[F];
|
||||
assert(State == CustomName);
|
||||
return CustomNames.find(F)->second;
|
||||
}
|
||||
|
||||
/// setUnavailable - this can be used by whatever sets up TargetLibraryInfo to
|
||||
/// ban use of specific library functions.
|
||||
void setUnavailable(LibFunc::Func F) {
|
||||
setState(F, Unavailable);
|
||||
}
|
||||
|
||||
void setAvailable(LibFunc::Func F) {
|
||||
setState(F, StandardName);
|
||||
}
|
||||
|
||||
void setAvailableWithName(LibFunc::Func F, StringRef Name) {
|
||||
if (StandardNames[F] != Name) {
|
||||
setState(F, CustomName);
|
||||
CustomNames[F] = Name;
|
||||
assert(CustomNames.find(F) != CustomNames.end());
|
||||
} else {
|
||||
setState(F, StandardName);
|
||||
}
|
||||
}
|
||||
|
||||
/// disableAllFunctions - This disables all builtins, which is used for
|
||||
/// options like -fno-builtin.
|
||||
void disableAllFunctions();
|
||||
};
|
||||
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
2275
thirdparty/clang/include/llvm/Target/TargetLowering.h
vendored
Normal file
2275
thirdparty/clang/include/llvm/Target/TargetLowering.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
149
thirdparty/clang/include/llvm/Target/TargetLoweringObjectFile.h
vendored
Normal file
149
thirdparty/clang/include/llvm/Target/TargetLoweringObjectFile.h
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
//===-- llvm/Target/TargetLoweringObjectFile.h - Object Info ----*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file implements classes used to handle lowerings specific to common
|
||||
// object file formats.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_TARGET_TARGETLOWERINGOBJECTFILE_H
|
||||
#define LLVM_TARGET_TARGETLOWERINGOBJECTFILE_H
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/IR/Module.h"
|
||||
#include "llvm/MC/MCObjectFileInfo.h"
|
||||
#include "llvm/MC/SectionKind.h"
|
||||
|
||||
namespace llvm {
|
||||
class MachineModuleInfo;
|
||||
class Mangler;
|
||||
class MCContext;
|
||||
class MCExpr;
|
||||
class MCSection;
|
||||
class MCSymbol;
|
||||
class MCSymbolRefExpr;
|
||||
class MCStreamer;
|
||||
class GlobalValue;
|
||||
class TargetMachine;
|
||||
|
||||
class TargetLoweringObjectFile : public MCObjectFileInfo {
|
||||
MCContext *Ctx;
|
||||
|
||||
TargetLoweringObjectFile(
|
||||
const TargetLoweringObjectFile&) LLVM_DELETED_FUNCTION;
|
||||
void operator=(const TargetLoweringObjectFile&) LLVM_DELETED_FUNCTION;
|
||||
|
||||
public:
|
||||
MCContext &getContext() const { return *Ctx; }
|
||||
|
||||
TargetLoweringObjectFile() : MCObjectFileInfo(), Ctx(0) {}
|
||||
|
||||
virtual ~TargetLoweringObjectFile();
|
||||
|
||||
/// Initialize - this method must be called before any actual lowering is
|
||||
/// done. This specifies the current context for codegen, and gives the
|
||||
/// lowering implementations a chance to set up their default sections.
|
||||
virtual void Initialize(MCContext &ctx, const TargetMachine &TM);
|
||||
|
||||
virtual void emitPersonalityValue(MCStreamer &Streamer,
|
||||
const TargetMachine &TM,
|
||||
const MCSymbol *Sym) const;
|
||||
|
||||
/// emitModuleFlags - Emit the module flags that the platform cares about.
|
||||
virtual void emitModuleFlags(MCStreamer &,
|
||||
ArrayRef<Module::ModuleFlagEntry>,
|
||||
Mangler *, const TargetMachine &) const {
|
||||
}
|
||||
|
||||
/// shouldEmitUsedDirectiveFor - This hook allows targets to selectively
|
||||
/// decide not to emit the UsedDirective for some symbols in llvm.used.
|
||||
/// FIXME: REMOVE this (rdar://7071300)
|
||||
virtual bool shouldEmitUsedDirectiveFor(const GlobalValue *GV,
|
||||
Mangler *) const {
|
||||
return GV != 0;
|
||||
}
|
||||
|
||||
/// getSectionForConstant - Given a constant with the SectionKind, return a
|
||||
/// section that it should be placed in.
|
||||
virtual const MCSection *getSectionForConstant(SectionKind Kind) const;
|
||||
|
||||
/// getKindForGlobal - Classify the specified global variable into a set of
|
||||
/// target independent categories embodied in SectionKind.
|
||||
static SectionKind getKindForGlobal(const GlobalValue *GV,
|
||||
const TargetMachine &TM);
|
||||
|
||||
/// SectionForGlobal - This method computes the appropriate section to emit
|
||||
/// the specified global variable or function definition. This should not
|
||||
/// be passed external (or available externally) globals.
|
||||
const MCSection *SectionForGlobal(const GlobalValue *GV,
|
||||
SectionKind Kind, Mangler *Mang,
|
||||
const TargetMachine &TM) const;
|
||||
|
||||
/// SectionForGlobal - This method computes the appropriate section to emit
|
||||
/// the specified global variable or function definition. This should not
|
||||
/// be passed external (or available externally) globals.
|
||||
const MCSection *SectionForGlobal(const GlobalValue *GV,
|
||||
Mangler *Mang,
|
||||
const TargetMachine &TM) const {
|
||||
return SectionForGlobal(GV, getKindForGlobal(GV, TM), Mang, TM);
|
||||
}
|
||||
|
||||
/// getExplicitSectionGlobal - Targets should implement this method to assign
|
||||
/// a section to globals with an explicit section specfied. The
|
||||
/// implementation of this method can assume that GV->hasSection() is true.
|
||||
virtual const MCSection *
|
||||
getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
|
||||
Mangler *Mang, const TargetMachine &TM) const = 0;
|
||||
|
||||
/// getSpecialCasedSectionGlobals - Allow the target to completely override
|
||||
/// section assignment of a global.
|
||||
virtual const MCSection *
|
||||
getSpecialCasedSectionGlobals(const GlobalValue *GV, Mangler *Mang,
|
||||
SectionKind Kind) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// getTTypeGlobalReference - Return an MCExpr to use for a reference
|
||||
/// to the specified global variable from exception handling information.
|
||||
///
|
||||
virtual const MCExpr *
|
||||
getTTypeGlobalReference(const GlobalValue *GV, Mangler *Mang,
|
||||
MachineModuleInfo *MMI, unsigned Encoding,
|
||||
MCStreamer &Streamer) const;
|
||||
|
||||
// getCFIPersonalitySymbol - The symbol that gets passed to .cfi_personality.
|
||||
virtual MCSymbol *
|
||||
getCFIPersonalitySymbol(const GlobalValue *GV, Mangler *Mang,
|
||||
MachineModuleInfo *MMI) const;
|
||||
|
||||
///
|
||||
const MCExpr *
|
||||
getTTypeReference(const MCSymbolRefExpr *Sym, unsigned Encoding,
|
||||
MCStreamer &Streamer) const;
|
||||
|
||||
virtual const MCSection *
|
||||
getStaticCtorSection(unsigned Priority = 65535) const {
|
||||
(void)Priority;
|
||||
return StaticCtorSection;
|
||||
}
|
||||
virtual const MCSection *
|
||||
getStaticDtorSection(unsigned Priority = 65535) const {
|
||||
(void)Priority;
|
||||
return StaticDtorSection;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual const MCSection *
|
||||
SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
|
||||
Mangler *Mang, const TargetMachine &TM) const;
|
||||
};
|
||||
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
345
thirdparty/clang/include/llvm/Target/TargetMachine.h
vendored
Normal file
345
thirdparty/clang/include/llvm/Target/TargetMachine.h
vendored
Normal file
@@ -0,0 +1,345 @@
|
||||
//===-- llvm/Target/TargetMachine.h - Target Information --------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines the TargetMachine and LLVMTargetMachine classes.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_TARGET_TARGETMACHINE_H
|
||||
#define LLVM_TARGET_TARGETMACHINE_H
|
||||
|
||||
#include "llvm/ADT/StringRef.h"
|
||||
#include "llvm/Pass.h"
|
||||
#include "llvm/Support/CodeGen.h"
|
||||
#include "llvm/Target/TargetOptions.h"
|
||||
#include <cassert>
|
||||
#include <string>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class InstrItineraryData;
|
||||
class JITCodeEmitter;
|
||||
class GlobalValue;
|
||||
class MCAsmInfo;
|
||||
class MCCodeGenInfo;
|
||||
class MCContext;
|
||||
class PassManagerBase;
|
||||
class Target;
|
||||
class DataLayout;
|
||||
class TargetFrameLowering;
|
||||
class TargetInstrInfo;
|
||||
class TargetIntrinsicInfo;
|
||||
class TargetJITInfo;
|
||||
class TargetLowering;
|
||||
class TargetPassConfig;
|
||||
class TargetRegisterInfo;
|
||||
class TargetSelectionDAGInfo;
|
||||
class TargetSubtargetInfo;
|
||||
class ScalarTargetTransformInfo;
|
||||
class VectorTargetTransformInfo;
|
||||
class formatted_raw_ostream;
|
||||
class raw_ostream;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
///
|
||||
/// TargetMachine - Primary interface to the complete machine description for
|
||||
/// the target machine. All target-specific information should be accessible
|
||||
/// through this interface.
|
||||
///
|
||||
class TargetMachine {
|
||||
TargetMachine(const TargetMachine &) LLVM_DELETED_FUNCTION;
|
||||
void operator=(const TargetMachine &) LLVM_DELETED_FUNCTION;
|
||||
protected: // Can only create subclasses.
|
||||
TargetMachine(const Target &T, StringRef TargetTriple,
|
||||
StringRef CPU, StringRef FS, const TargetOptions &Options);
|
||||
|
||||
/// TheTarget - The Target that this machine was created for.
|
||||
const Target &TheTarget;
|
||||
|
||||
/// TargetTriple, TargetCPU, TargetFS - Triple string, CPU name, and target
|
||||
/// feature strings the TargetMachine instance is created with.
|
||||
std::string TargetTriple;
|
||||
std::string TargetCPU;
|
||||
std::string TargetFS;
|
||||
|
||||
/// CodeGenInfo - Low level target information such as relocation model.
|
||||
const MCCodeGenInfo *CodeGenInfo;
|
||||
|
||||
/// AsmInfo - Contains target specific asm information.
|
||||
///
|
||||
const MCAsmInfo *AsmInfo;
|
||||
|
||||
unsigned MCRelaxAll : 1;
|
||||
unsigned MCNoExecStack : 1;
|
||||
unsigned MCSaveTempLabels : 1;
|
||||
unsigned MCUseLoc : 1;
|
||||
unsigned MCUseCFI : 1;
|
||||
unsigned MCUseDwarfDirectory : 1;
|
||||
|
||||
public:
|
||||
virtual ~TargetMachine();
|
||||
|
||||
const Target &getTarget() const { return TheTarget; }
|
||||
|
||||
const StringRef getTargetTriple() const { return TargetTriple; }
|
||||
const StringRef getTargetCPU() const { return TargetCPU; }
|
||||
const StringRef getTargetFeatureString() const { return TargetFS; }
|
||||
|
||||
/// getSubtargetImpl - virtual method implemented by subclasses that returns
|
||||
/// a reference to that target's TargetSubtargetInfo-derived member variable.
|
||||
virtual const TargetSubtargetInfo *getSubtargetImpl() const { return 0; }
|
||||
|
||||
mutable TargetOptions Options;
|
||||
|
||||
/// \brief Reset the target options based on the function's attributes.
|
||||
void resetTargetOptions(const MachineFunction *MF) const;
|
||||
|
||||
// Interfaces to the major aspects of target machine information:
|
||||
// -- Instruction opcode and operand information
|
||||
// -- Pipelines and scheduling information
|
||||
// -- Stack frame information
|
||||
// -- Selection DAG lowering information
|
||||
//
|
||||
virtual const TargetInstrInfo *getInstrInfo() const { return 0; }
|
||||
virtual const TargetFrameLowering *getFrameLowering() const { return 0; }
|
||||
virtual const TargetLowering *getTargetLowering() const { return 0; }
|
||||
virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const{ return 0; }
|
||||
virtual const DataLayout *getDataLayout() const { return 0; }
|
||||
|
||||
/// getMCAsmInfo - Return target specific asm information.
|
||||
///
|
||||
const MCAsmInfo *getMCAsmInfo() const { return AsmInfo; }
|
||||
|
||||
/// getSubtarget - This method returns a pointer to the specified type of
|
||||
/// TargetSubtargetInfo. In debug builds, it verifies that the object being
|
||||
/// returned is of the correct type.
|
||||
template<typename STC> const STC &getSubtarget() const {
|
||||
return *static_cast<const STC*>(getSubtargetImpl());
|
||||
}
|
||||
|
||||
/// getRegisterInfo - If register information is available, return it. If
|
||||
/// not, return null. This is kept separate from RegInfo until RegInfo has
|
||||
/// details of graph coloring register allocation removed from it.
|
||||
///
|
||||
virtual const TargetRegisterInfo *getRegisterInfo() const { return 0; }
|
||||
|
||||
/// getIntrinsicInfo - If intrinsic information is available, return it. If
|
||||
/// not, return null.
|
||||
///
|
||||
virtual const TargetIntrinsicInfo *getIntrinsicInfo() const { return 0; }
|
||||
|
||||
/// getJITInfo - If this target supports a JIT, return information for it,
|
||||
/// otherwise return null.
|
||||
///
|
||||
virtual TargetJITInfo *getJITInfo() { return 0; }
|
||||
|
||||
/// getInstrItineraryData - Returns instruction itinerary data for the target
|
||||
/// or specific subtarget.
|
||||
///
|
||||
virtual const InstrItineraryData *getInstrItineraryData() const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// hasMCRelaxAll - Check whether all machine code instructions should be
|
||||
/// relaxed.
|
||||
bool hasMCRelaxAll() const { return MCRelaxAll; }
|
||||
|
||||
/// setMCRelaxAll - Set whether all machine code instructions should be
|
||||
/// relaxed.
|
||||
void setMCRelaxAll(bool Value) { MCRelaxAll = Value; }
|
||||
|
||||
/// hasMCSaveTempLabels - Check whether temporary labels will be preserved
|
||||
/// (i.e., not treated as temporary).
|
||||
bool hasMCSaveTempLabels() const { return MCSaveTempLabels; }
|
||||
|
||||
/// setMCSaveTempLabels - Set whether temporary labels will be preserved
|
||||
/// (i.e., not treated as temporary).
|
||||
void setMCSaveTempLabels(bool Value) { MCSaveTempLabels = Value; }
|
||||
|
||||
/// hasMCNoExecStack - Check whether an executable stack is not needed.
|
||||
bool hasMCNoExecStack() const { return MCNoExecStack; }
|
||||
|
||||
/// setMCNoExecStack - Set whether an executabel stack is not needed.
|
||||
void setMCNoExecStack(bool Value) { MCNoExecStack = Value; }
|
||||
|
||||
/// hasMCUseLoc - Check whether we should use dwarf's .loc directive.
|
||||
bool hasMCUseLoc() const { return MCUseLoc; }
|
||||
|
||||
/// setMCUseLoc - Set whether all we should use dwarf's .loc directive.
|
||||
void setMCUseLoc(bool Value) { MCUseLoc = Value; }
|
||||
|
||||
/// hasMCUseCFI - Check whether we should use dwarf's .cfi_* directives.
|
||||
bool hasMCUseCFI() const { return MCUseCFI; }
|
||||
|
||||
/// setMCUseCFI - Set whether all we should use dwarf's .cfi_* directives.
|
||||
void setMCUseCFI(bool Value) { MCUseCFI = Value; }
|
||||
|
||||
/// hasMCUseDwarfDirectory - Check whether we should use .file directives with
|
||||
/// explicit directories.
|
||||
bool hasMCUseDwarfDirectory() const { return MCUseDwarfDirectory; }
|
||||
|
||||
/// setMCUseDwarfDirectory - Set whether all we should use .file directives
|
||||
/// with explicit directories.
|
||||
void setMCUseDwarfDirectory(bool Value) { MCUseDwarfDirectory = Value; }
|
||||
|
||||
/// getRelocationModel - Returns the code generation relocation model. The
|
||||
/// choices are static, PIC, and dynamic-no-pic, and target default.
|
||||
Reloc::Model getRelocationModel() const;
|
||||
|
||||
/// getCodeModel - Returns the code model. The choices are small, kernel,
|
||||
/// medium, large, and target default.
|
||||
CodeModel::Model getCodeModel() const;
|
||||
|
||||
/// getTLSModel - Returns the TLS model which should be used for the given
|
||||
/// global variable.
|
||||
TLSModel::Model getTLSModel(const GlobalValue *GV) const;
|
||||
|
||||
/// getOptLevel - Returns the optimization level: None, Less,
|
||||
/// Default, or Aggressive.
|
||||
CodeGenOpt::Level getOptLevel() const;
|
||||
|
||||
void setFastISel(bool Enable) { Options.EnableFastISel = Enable; }
|
||||
|
||||
bool shouldPrintMachineCode() const { return Options.PrintMachineCode; }
|
||||
|
||||
/// getAsmVerbosityDefault - Returns the default value of asm verbosity.
|
||||
///
|
||||
static bool getAsmVerbosityDefault();
|
||||
|
||||
/// setAsmVerbosityDefault - Set the default value of asm verbosity. Default
|
||||
/// is false.
|
||||
static void setAsmVerbosityDefault(bool);
|
||||
|
||||
/// getDataSections - Return true if data objects should be emitted into their
|
||||
/// own section, corresponds to -fdata-sections.
|
||||
static bool getDataSections();
|
||||
|
||||
/// getFunctionSections - Return true if functions should be emitted into
|
||||
/// their own section, corresponding to -ffunction-sections.
|
||||
static bool getFunctionSections();
|
||||
|
||||
/// setDataSections - Set if the data are emit into separate sections.
|
||||
static void setDataSections(bool);
|
||||
|
||||
/// setFunctionSections - Set if the functions are emit into separate
|
||||
/// sections.
|
||||
static void setFunctionSections(bool);
|
||||
|
||||
/// \brief Register analysis passes for this target with a pass manager.
|
||||
virtual void addAnalysisPasses(PassManagerBase &) {}
|
||||
|
||||
/// CodeGenFileType - These enums are meant to be passed into
|
||||
/// addPassesToEmitFile to indicate what type of file to emit, and returned by
|
||||
/// it to indicate what type of file could actually be made.
|
||||
enum CodeGenFileType {
|
||||
CGFT_AssemblyFile,
|
||||
CGFT_ObjectFile,
|
||||
CGFT_Null // Do not emit any output.
|
||||
};
|
||||
|
||||
/// addPassesToEmitFile - Add passes to the specified pass manager to get the
|
||||
/// specified file emitted. Typically this will involve several steps of code
|
||||
/// generation. This method should return true if emission of this file type
|
||||
/// is not supported, or false on success.
|
||||
virtual bool addPassesToEmitFile(PassManagerBase &,
|
||||
formatted_raw_ostream &,
|
||||
CodeGenFileType,
|
||||
bool /*DisableVerify*/ = true,
|
||||
AnalysisID StartAfter = 0,
|
||||
AnalysisID StopAfter = 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/// addPassesToEmitMachineCode - Add passes to the specified pass manager to
|
||||
/// get machine code emitted. This uses a JITCodeEmitter object to handle
|
||||
/// actually outputting the machine code and resolving things like the address
|
||||
/// of functions. This method returns true if machine code emission is
|
||||
/// not supported.
|
||||
///
|
||||
virtual bool addPassesToEmitMachineCode(PassManagerBase &,
|
||||
JITCodeEmitter &,
|
||||
bool /*DisableVerify*/ = true) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/// addPassesToEmitMC - Add passes to the specified pass manager to get
|
||||
/// machine code emitted with the MCJIT. This method returns true if machine
|
||||
/// code is not supported. It fills the MCContext Ctx pointer which can be
|
||||
/// used to build custom MCStreamer.
|
||||
///
|
||||
virtual bool addPassesToEmitMC(PassManagerBase &,
|
||||
MCContext *&,
|
||||
raw_ostream &,
|
||||
bool /*DisableVerify*/ = true) {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
/// LLVMTargetMachine - This class describes a target machine that is
|
||||
/// implemented with the LLVM target-independent code generator.
|
||||
///
|
||||
class LLVMTargetMachine : public TargetMachine {
|
||||
protected: // Can only create subclasses.
|
||||
LLVMTargetMachine(const Target &T, StringRef TargetTriple,
|
||||
StringRef CPU, StringRef FS, TargetOptions Options,
|
||||
Reloc::Model RM, CodeModel::Model CM,
|
||||
CodeGenOpt::Level OL);
|
||||
|
||||
public:
|
||||
/// \brief Register analysis passes for this target with a pass manager.
|
||||
///
|
||||
/// This registers target independent analysis passes.
|
||||
virtual void addAnalysisPasses(PassManagerBase &PM);
|
||||
|
||||
/// createPassConfig - Create a pass configuration object to be used by
|
||||
/// addPassToEmitX methods for generating a pipeline of CodeGen passes.
|
||||
virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
|
||||
|
||||
/// addPassesToEmitFile - Add passes to the specified pass manager to get the
|
||||
/// specified file emitted. Typically this will involve several steps of code
|
||||
/// generation.
|
||||
virtual bool addPassesToEmitFile(PassManagerBase &PM,
|
||||
formatted_raw_ostream &Out,
|
||||
CodeGenFileType FileType,
|
||||
bool DisableVerify = true,
|
||||
AnalysisID StartAfter = 0,
|
||||
AnalysisID StopAfter = 0);
|
||||
|
||||
/// addPassesToEmitMachineCode - Add passes to the specified pass manager to
|
||||
/// get machine code emitted. This uses a JITCodeEmitter object to handle
|
||||
/// actually outputting the machine code and resolving things like the address
|
||||
/// of functions. This method returns true if machine code emission is
|
||||
/// not supported.
|
||||
///
|
||||
virtual bool addPassesToEmitMachineCode(PassManagerBase &PM,
|
||||
JITCodeEmitter &MCE,
|
||||
bool DisableVerify = true);
|
||||
|
||||
/// addPassesToEmitMC - Add passes to the specified pass manager to get
|
||||
/// machine code emitted with the MCJIT. This method returns true if machine
|
||||
/// code is not supported. It fills the MCContext Ctx pointer which can be
|
||||
/// used to build custom MCStreamer.
|
||||
///
|
||||
virtual bool addPassesToEmitMC(PassManagerBase &PM,
|
||||
MCContext *&Ctx,
|
||||
raw_ostream &OS,
|
||||
bool DisableVerify = true);
|
||||
|
||||
/// addCodeEmitter - This pass should be overridden by the target to add a
|
||||
/// code emitter, if supported. If this is not supported, 'true' should be
|
||||
/// returned.
|
||||
virtual bool addCodeEmitter(PassManagerBase &,
|
||||
JITCodeEmitter &) {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
99
thirdparty/clang/include/llvm/Target/TargetOpcodes.h
vendored
Normal file
99
thirdparty/clang/include/llvm/Target/TargetOpcodes.h
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
//===-- llvm/Target/TargetOpcodes.h - Target Indep Opcodes ------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines the target independent instruction opcodes.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_TARGET_TARGETOPCODES_H
|
||||
#define LLVM_TARGET_TARGETOPCODES_H
|
||||
|
||||
namespace llvm {
|
||||
|
||||
/// Invariant opcodes: All instruction sets have these as their low opcodes.
|
||||
///
|
||||
/// Every instruction defined here must also appear in Target.td and the order
|
||||
/// must be the same as in CodeGenTarget.cpp.
|
||||
///
|
||||
namespace TargetOpcode {
|
||||
enum {
|
||||
PHI = 0,
|
||||
INLINEASM = 1,
|
||||
PROLOG_LABEL = 2,
|
||||
EH_LABEL = 3,
|
||||
GC_LABEL = 4,
|
||||
|
||||
/// KILL - This instruction is a noop that is used only to adjust the
|
||||
/// liveness of registers. This can be useful when dealing with
|
||||
/// sub-registers.
|
||||
KILL = 5,
|
||||
|
||||
/// EXTRACT_SUBREG - This instruction takes two operands: a register
|
||||
/// that has subregisters, and a subregister index. It returns the
|
||||
/// extracted subregister value. This is commonly used to implement
|
||||
/// truncation operations on target architectures which support it.
|
||||
EXTRACT_SUBREG = 6,
|
||||
|
||||
/// INSERT_SUBREG - This instruction takes three operands: a register that
|
||||
/// has subregisters, a register providing an insert value, and a
|
||||
/// subregister index. It returns the value of the first register with the
|
||||
/// value of the second register inserted. The first register is often
|
||||
/// defined by an IMPLICIT_DEF, because it is commonly used to implement
|
||||
/// anyext operations on target architectures which support it.
|
||||
INSERT_SUBREG = 7,
|
||||
|
||||
/// IMPLICIT_DEF - This is the MachineInstr-level equivalent of undef.
|
||||
IMPLICIT_DEF = 8,
|
||||
|
||||
/// SUBREG_TO_REG - This instruction is similar to INSERT_SUBREG except that
|
||||
/// the first operand is an immediate integer constant. This constant is
|
||||
/// often zero, because it is commonly used to assert that the instruction
|
||||
/// defining the register implicitly clears the high bits.
|
||||
SUBREG_TO_REG = 9,
|
||||
|
||||
/// COPY_TO_REGCLASS - This instruction is a placeholder for a plain
|
||||
/// register-to-register copy into a specific register class. This is only
|
||||
/// used between instruction selection and MachineInstr creation, before
|
||||
/// virtual registers have been created for all the instructions, and it's
|
||||
/// only needed in cases where the register classes implied by the
|
||||
/// instructions are insufficient. It is emitted as a COPY MachineInstr.
|
||||
COPY_TO_REGCLASS = 10,
|
||||
|
||||
/// DBG_VALUE - a mapping of the llvm.dbg.value intrinsic
|
||||
DBG_VALUE = 11,
|
||||
|
||||
/// REG_SEQUENCE - This variadic instruction is used to form a register that
|
||||
/// represent a consecutive sequence of sub-registers. It's used as register
|
||||
/// coalescing / allocation aid and must be eliminated before code emission.
|
||||
// In SDNode form, the first operand encodes the register class created by
|
||||
// the REG_SEQUENCE, while each subsequent pair names a vreg + subreg index
|
||||
// pair. Once it has been lowered to a MachineInstr, the regclass operand
|
||||
// is no longer present.
|
||||
/// e.g. v1027 = REG_SEQUENCE v1024, 3, v1025, 4, v1026, 5
|
||||
/// After register coalescing references of v1024 should be replace with
|
||||
/// v1027:3, v1025 with v1027:4, etc.
|
||||
REG_SEQUENCE = 12,
|
||||
|
||||
/// COPY - Target-independent register copy. This instruction can also be
|
||||
/// used to copy between subregisters of virtual registers.
|
||||
COPY = 13,
|
||||
|
||||
/// BUNDLE - This instruction represents an instruction bundle. Instructions
|
||||
/// which immediately follow a BUNDLE instruction which are marked with
|
||||
/// 'InsideBundle' flag are inside the bundle.
|
||||
BUNDLE = 14,
|
||||
|
||||
/// Lifetime markers.
|
||||
LIFETIME_START = 15,
|
||||
LIFETIME_END = 16
|
||||
};
|
||||
} // end namespace TargetOpcode
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
215
thirdparty/clang/include/llvm/Target/TargetOptions.h
vendored
Normal file
215
thirdparty/clang/include/llvm/Target/TargetOptions.h
vendored
Normal file
@@ -0,0 +1,215 @@
|
||||
//===-- llvm/Target/TargetOptions.h - Target Options ------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines command line option flags that are shared across various
|
||||
// targets.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_TARGET_TARGETOPTIONS_H
|
||||
#define LLVM_TARGET_TARGETOPTIONS_H
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace llvm {
|
||||
class MachineFunction;
|
||||
class StringRef;
|
||||
|
||||
// Possible float ABI settings. Used with FloatABIType in TargetOptions.h.
|
||||
namespace FloatABI {
|
||||
enum ABIType {
|
||||
Default, // Target-specific (either soft or hard depending on triple,etc).
|
||||
Soft, // Soft float.
|
||||
Hard // Hard float.
|
||||
};
|
||||
}
|
||||
|
||||
namespace FPOpFusion {
|
||||
enum FPOpFusionMode {
|
||||
Fast, // Enable fusion of FP ops wherever it's profitable.
|
||||
Standard, // Only allow fusion of 'blessed' ops (currently just fmuladd).
|
||||
Strict // Never fuse FP-ops.
|
||||
};
|
||||
}
|
||||
|
||||
class TargetOptions {
|
||||
public:
|
||||
TargetOptions()
|
||||
: PrintMachineCode(false), NoFramePointerElim(false),
|
||||
NoFramePointerElimNonLeaf(false), LessPreciseFPMADOption(false),
|
||||
UnsafeFPMath(false), NoInfsFPMath(false),
|
||||
NoNaNsFPMath(false), HonorSignDependentRoundingFPMathOption(false),
|
||||
UseSoftFloat(false), NoZerosInBSS(false), JITExceptionHandling(false),
|
||||
JITEmitDebugInfo(false), JITEmitDebugInfoToDisk(false),
|
||||
GuaranteedTailCallOpt(false), DisableTailCalls(false),
|
||||
StackAlignmentOverride(0), RealignStack(true), SSPBufferSize(0),
|
||||
EnableFastISel(false), PositionIndependentExecutable(false),
|
||||
EnableSegmentedStacks(false), UseInitArray(false), TrapFuncName(""),
|
||||
FloatABIType(FloatABI::Default), AllowFPOpFusion(FPOpFusion::Standard)
|
||||
{}
|
||||
|
||||
/// PrintMachineCode - This flag is enabled when the -print-machineinstrs
|
||||
/// option is specified on the command line, and should enable debugging
|
||||
/// output from the code generator.
|
||||
unsigned PrintMachineCode : 1;
|
||||
|
||||
/// NoFramePointerElim - This flag is enabled when the -disable-fp-elim is
|
||||
/// specified on the command line. If the target supports the frame pointer
|
||||
/// elimination optimization, this option should disable it.
|
||||
unsigned NoFramePointerElim : 1;
|
||||
|
||||
/// NoFramePointerElimNonLeaf - This flag is enabled when the
|
||||
/// -disable-non-leaf-fp-elim is specified on the command line. If the
|
||||
/// target supports the frame pointer elimination optimization, this option
|
||||
/// should disable it for non-leaf functions.
|
||||
unsigned NoFramePointerElimNonLeaf : 1;
|
||||
|
||||
/// DisableFramePointerElim - This returns true if frame pointer elimination
|
||||
/// optimization should be disabled for the given machine function.
|
||||
bool DisableFramePointerElim(const MachineFunction &MF) const;
|
||||
|
||||
/// LessPreciseFPMAD - This flag is enabled when the
|
||||
/// -enable-fp-mad is specified on the command line. When this flag is off
|
||||
/// (the default), the code generator is not allowed to generate mad
|
||||
/// (multiply add) if the result is "less precise" than doing those
|
||||
/// operations individually.
|
||||
unsigned LessPreciseFPMADOption : 1;
|
||||
bool LessPreciseFPMAD() const;
|
||||
|
||||
/// UnsafeFPMath - This flag is enabled when the
|
||||
/// -enable-unsafe-fp-math flag is specified on the command line. When
|
||||
/// this flag is off (the default), the code generator is not allowed to
|
||||
/// produce results that are "less precise" than IEEE allows. This includes
|
||||
/// use of X86 instructions like FSIN and FCOS instead of libcalls.
|
||||
/// UnsafeFPMath implies LessPreciseFPMAD.
|
||||
unsigned UnsafeFPMath : 1;
|
||||
|
||||
/// NoInfsFPMath - This flag is enabled when the
|
||||
/// -enable-no-infs-fp-math flag is specified on the command line. When
|
||||
/// this flag is off (the default), the code generator is not allowed to
|
||||
/// assume the FP arithmetic arguments and results are never +-Infs.
|
||||
unsigned NoInfsFPMath : 1;
|
||||
|
||||
/// NoNaNsFPMath - This flag is enabled when the
|
||||
/// -enable-no-nans-fp-math flag is specified on the command line. When
|
||||
/// this flag is off (the default), the code generator is not allowed to
|
||||
/// assume the FP arithmetic arguments and results are never NaNs.
|
||||
unsigned NoNaNsFPMath : 1;
|
||||
|
||||
/// HonorSignDependentRoundingFPMath - This returns true when the
|
||||
/// -enable-sign-dependent-rounding-fp-math is specified. If this returns
|
||||
/// false (the default), the code generator is allowed to assume that the
|
||||
/// rounding behavior is the default (round-to-zero for all floating point
|
||||
/// to integer conversions, and round-to-nearest for all other arithmetic
|
||||
/// truncations). If this is enabled (set to true), the code generator must
|
||||
/// assume that the rounding mode may dynamically change.
|
||||
unsigned HonorSignDependentRoundingFPMathOption : 1;
|
||||
bool HonorSignDependentRoundingFPMath() const;
|
||||
|
||||
/// UseSoftFloat - This flag is enabled when the -soft-float flag is
|
||||
/// specified on the command line. When this flag is on, the code generator
|
||||
/// will generate libcalls to the software floating point library instead of
|
||||
/// target FP instructions.
|
||||
unsigned UseSoftFloat : 1;
|
||||
|
||||
/// NoZerosInBSS - By default some codegens place zero-initialized data to
|
||||
/// .bss section. This flag disables such behaviour (necessary, e.g. for
|
||||
/// crt*.o compiling).
|
||||
unsigned NoZerosInBSS : 1;
|
||||
|
||||
/// JITExceptionHandling - This flag indicates that the JIT should emit
|
||||
/// exception handling information.
|
||||
unsigned JITExceptionHandling : 1;
|
||||
|
||||
/// JITEmitDebugInfo - This flag indicates that the JIT should try to emit
|
||||
/// debug information and notify a debugger about it.
|
||||
unsigned JITEmitDebugInfo : 1;
|
||||
|
||||
/// JITEmitDebugInfoToDisk - This flag indicates that the JIT should write
|
||||
/// the object files generated by the JITEmitDebugInfo flag to disk. This
|
||||
/// flag is hidden and is only for debugging the debug info.
|
||||
unsigned JITEmitDebugInfoToDisk : 1;
|
||||
|
||||
/// GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is
|
||||
/// specified on the commandline. When the flag is on, participating targets
|
||||
/// will perform tail call optimization on all calls which use the fastcc
|
||||
/// calling convention and which satisfy certain target-independent
|
||||
/// criteria (being at the end of a function, having the same return type
|
||||
/// as their parent function, etc.), using an alternate ABI if necessary.
|
||||
unsigned GuaranteedTailCallOpt : 1;
|
||||
|
||||
/// DisableTailCalls - This flag controls whether we will use tail calls.
|
||||
/// Disabling them may be useful to maintain a correct call stack.
|
||||
unsigned DisableTailCalls : 1;
|
||||
|
||||
/// StackAlignmentOverride - Override default stack alignment for target.
|
||||
unsigned StackAlignmentOverride;
|
||||
|
||||
/// RealignStack - This flag indicates whether the stack should be
|
||||
/// automatically realigned, if needed.
|
||||
unsigned RealignStack : 1;
|
||||
|
||||
/// SSPBufferSize - The minimum size of buffers that will receive stack
|
||||
/// smashing protection when -fstack-protection is used.
|
||||
unsigned SSPBufferSize;
|
||||
|
||||
/// EnableFastISel - This flag enables fast-path instruction selection
|
||||
/// which trades away generated code quality in favor of reducing
|
||||
/// compile time.
|
||||
unsigned EnableFastISel : 1;
|
||||
|
||||
/// PositionIndependentExecutable - This flag indicates whether the code
|
||||
/// will eventually be linked into a single executable, despite the PIC
|
||||
/// relocation model being in use. It's value is undefined (and irrelevant)
|
||||
/// if the relocation model is anything other than PIC.
|
||||
unsigned PositionIndependentExecutable : 1;
|
||||
|
||||
unsigned EnableSegmentedStacks : 1;
|
||||
|
||||
/// UseInitArray - Use .init_array instead of .ctors for static
|
||||
/// constructors.
|
||||
unsigned UseInitArray : 1;
|
||||
|
||||
/// getTrapFunctionName - If this returns a non-empty string, this means
|
||||
/// isel should lower Intrinsic::trap to a call to the specified function
|
||||
/// name instead of an ISD::TRAP node.
|
||||
std::string TrapFuncName;
|
||||
StringRef getTrapFunctionName() const;
|
||||
|
||||
/// FloatABIType - This setting is set by -float-abi=xxx option is specfied
|
||||
/// on the command line. This setting may either be Default, Soft, or Hard.
|
||||
/// Default selects the target's default behavior. Soft selects the ABI for
|
||||
/// UseSoftFloat, but does not indicate that FP hardware may not be used.
|
||||
/// Such a combination is unfortunately popular (e.g. arm-apple-darwin).
|
||||
/// Hard presumes that the normal FP ABI is used.
|
||||
FloatABI::ABIType FloatABIType;
|
||||
|
||||
/// AllowFPOpFusion - This flag is set by the -fuse-fp-ops=xxx option.
|
||||
/// This controls the creation of fused FP ops that store intermediate
|
||||
/// results in higher precision than IEEE allows (E.g. FMAs).
|
||||
///
|
||||
/// Fast mode - allows formation of fused FP ops whenever they're
|
||||
/// profitable.
|
||||
/// Standard mode - allow fusion only for 'blessed' FP ops. At present the
|
||||
/// only blessed op is the fmuladd intrinsic. In the future more blessed ops
|
||||
/// may be added.
|
||||
/// Strict mode - allow fusion only if/when it can be proven that the excess
|
||||
/// precision won't effect the result.
|
||||
///
|
||||
/// Note: This option only controls formation of fused ops by the
|
||||
/// optimizers. Fused operations that are explicitly specified (e.g. FMA
|
||||
/// via the llvm.fma.* intrinsic) will always be honored, regardless of
|
||||
/// the value of this option.
|
||||
FPOpFusion::FPOpFusionMode AllowFPOpFusion;
|
||||
|
||||
bool operator==(const TargetOptions &);
|
||||
};
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
892
thirdparty/clang/include/llvm/Target/TargetRegisterInfo.h
vendored
Normal file
892
thirdparty/clang/include/llvm/Target/TargetRegisterInfo.h
vendored
Normal file
@@ -0,0 +1,892 @@
|
||||
//=== Target/TargetRegisterInfo.h - Target Register Information -*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file describes an abstract interface used to get information about a
|
||||
// target machines register file. This information is used for a variety of
|
||||
// purposed, especially register allocation.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_TARGET_TARGETREGISTERINFO_H
|
||||
#define LLVM_TARGET_TARGETREGISTERINFO_H
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||
#include "llvm/CodeGen/ValueTypes.h"
|
||||
#include "llvm/IR/CallingConv.h"
|
||||
#include "llvm/MC/MCRegisterInfo.h"
|
||||
#include <cassert>
|
||||
#include <functional>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class BitVector;
|
||||
class MachineFunction;
|
||||
class RegScavenger;
|
||||
template<class T> class SmallVectorImpl;
|
||||
class VirtRegMap;
|
||||
class raw_ostream;
|
||||
|
||||
class TargetRegisterClass {
|
||||
public:
|
||||
typedef const MCPhysReg* iterator;
|
||||
typedef const MCPhysReg* const_iterator;
|
||||
typedef const MVT::SimpleValueType* vt_iterator;
|
||||
typedef const TargetRegisterClass* const * sc_iterator;
|
||||
|
||||
// Instance variables filled by tablegen, do not use!
|
||||
const MCRegisterClass *MC;
|
||||
const vt_iterator VTs;
|
||||
const uint32_t *SubClassMask;
|
||||
const uint16_t *SuperRegIndices;
|
||||
const sc_iterator SuperClasses;
|
||||
ArrayRef<MCPhysReg> (*OrderFunc)(const MachineFunction&);
|
||||
|
||||
/// getID() - Return the register class ID number.
|
||||
///
|
||||
unsigned getID() const { return MC->getID(); }
|
||||
|
||||
/// getName() - Return the register class name for debugging.
|
||||
///
|
||||
const char *getName() const { return MC->getName(); }
|
||||
|
||||
/// begin/end - Return all of the registers in this class.
|
||||
///
|
||||
iterator begin() const { return MC->begin(); }
|
||||
iterator end() const { return MC->end(); }
|
||||
|
||||
/// getNumRegs - Return the number of registers in this class.
|
||||
///
|
||||
unsigned getNumRegs() const { return MC->getNumRegs(); }
|
||||
|
||||
/// getRegister - Return the specified register in the class.
|
||||
///
|
||||
unsigned getRegister(unsigned i) const {
|
||||
return MC->getRegister(i);
|
||||
}
|
||||
|
||||
/// contains - Return true if the specified register is included in this
|
||||
/// register class. This does not include virtual registers.
|
||||
bool contains(unsigned Reg) const {
|
||||
return MC->contains(Reg);
|
||||
}
|
||||
|
||||
/// contains - Return true if both registers are in this class.
|
||||
bool contains(unsigned Reg1, unsigned Reg2) const {
|
||||
return MC->contains(Reg1, Reg2);
|
||||
}
|
||||
|
||||
/// getSize - Return the size of the register in bytes, which is also the size
|
||||
/// of a stack slot allocated to hold a spilled copy of this register.
|
||||
unsigned getSize() const { return MC->getSize(); }
|
||||
|
||||
/// getAlignment - Return the minimum required alignment for a register of
|
||||
/// this class.
|
||||
unsigned getAlignment() const { return MC->getAlignment(); }
|
||||
|
||||
/// getCopyCost - Return the cost of copying a value between two registers in
|
||||
/// this class. A negative number means the register class is very expensive
|
||||
/// to copy e.g. status flag register classes.
|
||||
int getCopyCost() const { return MC->getCopyCost(); }
|
||||
|
||||
/// isAllocatable - Return true if this register class may be used to create
|
||||
/// virtual registers.
|
||||
bool isAllocatable() const { return MC->isAllocatable(); }
|
||||
|
||||
/// hasType - return true if this TargetRegisterClass has the ValueType vt.
|
||||
///
|
||||
bool hasType(EVT vt) const {
|
||||
for(int i = 0; VTs[i] != MVT::Other; ++i)
|
||||
if (EVT(VTs[i]) == vt)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/// vt_begin / vt_end - Loop over all of the value types that can be
|
||||
/// represented by values in this register class.
|
||||
vt_iterator vt_begin() const {
|
||||
return VTs;
|
||||
}
|
||||
|
||||
vt_iterator vt_end() const {
|
||||
vt_iterator I = VTs;
|
||||
while (*I != MVT::Other) ++I;
|
||||
return I;
|
||||
}
|
||||
|
||||
/// hasSubClass - return true if the specified TargetRegisterClass
|
||||
/// is a proper sub-class of this TargetRegisterClass.
|
||||
bool hasSubClass(const TargetRegisterClass *RC) const {
|
||||
return RC != this && hasSubClassEq(RC);
|
||||
}
|
||||
|
||||
/// hasSubClassEq - Returns true if RC is a sub-class of or equal to this
|
||||
/// class.
|
||||
bool hasSubClassEq(const TargetRegisterClass *RC) const {
|
||||
unsigned ID = RC->getID();
|
||||
return (SubClassMask[ID / 32] >> (ID % 32)) & 1;
|
||||
}
|
||||
|
||||
/// hasSuperClass - return true if the specified TargetRegisterClass is a
|
||||
/// proper super-class of this TargetRegisterClass.
|
||||
bool hasSuperClass(const TargetRegisterClass *RC) const {
|
||||
return RC->hasSubClass(this);
|
||||
}
|
||||
|
||||
/// hasSuperClassEq - Returns true if RC is a super-class of or equal to this
|
||||
/// class.
|
||||
bool hasSuperClassEq(const TargetRegisterClass *RC) const {
|
||||
return RC->hasSubClassEq(this);
|
||||
}
|
||||
|
||||
/// getSubClassMask - Returns a bit vector of subclasses, including this one.
|
||||
/// The vector is indexed by class IDs, see hasSubClassEq() above for how to
|
||||
/// use it.
|
||||
const uint32_t *getSubClassMask() const {
|
||||
return SubClassMask;
|
||||
}
|
||||
|
||||
/// getSuperRegIndices - Returns a 0-terminated list of sub-register indices
|
||||
/// that project some super-register class into this register class. The list
|
||||
/// has an entry for each Idx such that:
|
||||
///
|
||||
/// There exists SuperRC where:
|
||||
/// For all Reg in SuperRC:
|
||||
/// this->contains(Reg:Idx)
|
||||
///
|
||||
const uint16_t *getSuperRegIndices() const {
|
||||
return SuperRegIndices;
|
||||
}
|
||||
|
||||
/// getSuperClasses - Returns a NULL terminated list of super-classes. The
|
||||
/// classes are ordered by ID which is also a topological ordering from large
|
||||
/// to small classes. The list does NOT include the current class.
|
||||
sc_iterator getSuperClasses() const {
|
||||
return SuperClasses;
|
||||
}
|
||||
|
||||
/// isASubClass - return true if this TargetRegisterClass is a subset
|
||||
/// class of at least one other TargetRegisterClass.
|
||||
bool isASubClass() const {
|
||||
return SuperClasses[0] != 0;
|
||||
}
|
||||
|
||||
/// getRawAllocationOrder - Returns the preferred order for allocating
|
||||
/// registers from this register class in MF. The raw order comes directly
|
||||
/// from the .td file and may include reserved registers that are not
|
||||
/// allocatable. Register allocators should also make sure to allocate
|
||||
/// callee-saved registers only after all the volatiles are used. The
|
||||
/// RegisterClassInfo class provides filtered allocation orders with
|
||||
/// callee-saved registers moved to the end.
|
||||
///
|
||||
/// The MachineFunction argument can be used to tune the allocatable
|
||||
/// registers based on the characteristics of the function, subtarget, or
|
||||
/// other criteria.
|
||||
///
|
||||
/// By default, this method returns all registers in the class.
|
||||
///
|
||||
ArrayRef<MCPhysReg> getRawAllocationOrder(const MachineFunction &MF) const {
|
||||
return OrderFunc ? OrderFunc(MF) : makeArrayRef(begin(), getNumRegs());
|
||||
}
|
||||
};
|
||||
|
||||
/// TargetRegisterInfoDesc - Extra information, not in MCRegisterDesc, about
|
||||
/// registers. These are used by codegen, not by MC.
|
||||
struct TargetRegisterInfoDesc {
|
||||
unsigned CostPerUse; // Extra cost of instructions using register.
|
||||
bool inAllocatableClass; // Register belongs to an allocatable regclass.
|
||||
};
|
||||
|
||||
/// Each TargetRegisterClass has a per register weight, and weight
|
||||
/// limit which must be less than the limits of its pressure sets.
|
||||
struct RegClassWeight {
|
||||
unsigned RegWeight;
|
||||
unsigned WeightLimit;
|
||||
};
|
||||
|
||||
/// TargetRegisterInfo base class - We assume that the target defines a static
|
||||
/// array of TargetRegisterDesc objects that represent all of the machine
|
||||
/// registers that the target has. As such, we simply have to track a pointer
|
||||
/// to this array so that we can turn register number into a register
|
||||
/// descriptor.
|
||||
///
|
||||
class TargetRegisterInfo : public MCRegisterInfo {
|
||||
public:
|
||||
typedef const TargetRegisterClass * const * regclass_iterator;
|
||||
private:
|
||||
const TargetRegisterInfoDesc *InfoDesc; // Extra desc array for codegen
|
||||
const char *const *SubRegIndexNames; // Names of subreg indexes.
|
||||
// Pointer to array of lane masks, one per sub-reg index.
|
||||
const unsigned *SubRegIndexLaneMasks;
|
||||
|
||||
regclass_iterator RegClassBegin, RegClassEnd; // List of regclasses
|
||||
|
||||
protected:
|
||||
TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
|
||||
regclass_iterator RegClassBegin,
|
||||
regclass_iterator RegClassEnd,
|
||||
const char *const *SRINames,
|
||||
const unsigned *SRILaneMasks);
|
||||
virtual ~TargetRegisterInfo();
|
||||
public:
|
||||
|
||||
// Register numbers can represent physical registers, virtual registers, and
|
||||
// sometimes stack slots. The unsigned values are divided into these ranges:
|
||||
//
|
||||
// 0 Not a register, can be used as a sentinel.
|
||||
// [1;2^30) Physical registers assigned by TableGen.
|
||||
// [2^30;2^31) Stack slots. (Rarely used.)
|
||||
// [2^31;2^32) Virtual registers assigned by MachineRegisterInfo.
|
||||
//
|
||||
// Further sentinels can be allocated from the small negative integers.
|
||||
// DenseMapInfo<unsigned> uses -1u and -2u.
|
||||
|
||||
/// isStackSlot - Sometimes it is useful the be able to store a non-negative
|
||||
/// frame index in a variable that normally holds a register. isStackSlot()
|
||||
/// returns true if Reg is in the range used for stack slots.
|
||||
///
|
||||
/// Note that isVirtualRegister() and isPhysicalRegister() cannot handle stack
|
||||
/// slots, so if a variable may contains a stack slot, always check
|
||||
/// isStackSlot() first.
|
||||
///
|
||||
static bool isStackSlot(unsigned Reg) {
|
||||
return int(Reg) >= (1 << 30);
|
||||
}
|
||||
|
||||
/// stackSlot2Index - Compute the frame index from a register value
|
||||
/// representing a stack slot.
|
||||
static int stackSlot2Index(unsigned Reg) {
|
||||
assert(isStackSlot(Reg) && "Not a stack slot");
|
||||
return int(Reg - (1u << 30));
|
||||
}
|
||||
|
||||
/// index2StackSlot - Convert a non-negative frame index to a stack slot
|
||||
/// register value.
|
||||
static unsigned index2StackSlot(int FI) {
|
||||
assert(FI >= 0 && "Cannot hold a negative frame index.");
|
||||
return FI + (1u << 30);
|
||||
}
|
||||
|
||||
/// isPhysicalRegister - Return true if the specified register number is in
|
||||
/// the physical register namespace.
|
||||
static bool isPhysicalRegister(unsigned Reg) {
|
||||
assert(!isStackSlot(Reg) && "Not a register! Check isStackSlot() first.");
|
||||
return int(Reg) > 0;
|
||||
}
|
||||
|
||||
/// isVirtualRegister - Return true if the specified register number is in
|
||||
/// the virtual register namespace.
|
||||
static bool isVirtualRegister(unsigned Reg) {
|
||||
assert(!isStackSlot(Reg) && "Not a register! Check isStackSlot() first.");
|
||||
return int(Reg) < 0;
|
||||
}
|
||||
|
||||
/// virtReg2Index - Convert a virtual register number to a 0-based index.
|
||||
/// The first virtual register in a function will get the index 0.
|
||||
static unsigned virtReg2Index(unsigned Reg) {
|
||||
assert(isVirtualRegister(Reg) && "Not a virtual register");
|
||||
return Reg & ~(1u << 31);
|
||||
}
|
||||
|
||||
/// index2VirtReg - Convert a 0-based index to a virtual register number.
|
||||
/// This is the inverse operation of VirtReg2IndexFunctor below.
|
||||
static unsigned index2VirtReg(unsigned Index) {
|
||||
return Index | (1u << 31);
|
||||
}
|
||||
|
||||
/// getMinimalPhysRegClass - Returns the Register Class of a physical
|
||||
/// register of the given type, picking the most sub register class of
|
||||
/// the right type that contains this physreg.
|
||||
const TargetRegisterClass *
|
||||
getMinimalPhysRegClass(unsigned Reg, EVT VT = MVT::Other) const;
|
||||
|
||||
/// getAllocatableClass - Return the maximal subclass of the given register
|
||||
/// class that is alloctable, or NULL.
|
||||
const TargetRegisterClass *
|
||||
getAllocatableClass(const TargetRegisterClass *RC) const;
|
||||
|
||||
/// getAllocatableSet - Returns a bitset indexed by register number
|
||||
/// indicating if a register is allocatable or not. If a register class is
|
||||
/// specified, returns the subset for the class.
|
||||
BitVector getAllocatableSet(const MachineFunction &MF,
|
||||
const TargetRegisterClass *RC = NULL) const;
|
||||
|
||||
/// getCostPerUse - Return the additional cost of using this register instead
|
||||
/// of other registers in its class.
|
||||
unsigned getCostPerUse(unsigned RegNo) const {
|
||||
return InfoDesc[RegNo].CostPerUse;
|
||||
}
|
||||
|
||||
/// isInAllocatableClass - Return true if the register is in the allocation
|
||||
/// of any register class.
|
||||
bool isInAllocatableClass(unsigned RegNo) const {
|
||||
return InfoDesc[RegNo].inAllocatableClass;
|
||||
}
|
||||
|
||||
/// getSubRegIndexName - Return the human-readable symbolic target-specific
|
||||
/// name for the specified SubRegIndex.
|
||||
const char *getSubRegIndexName(unsigned SubIdx) const {
|
||||
assert(SubIdx && SubIdx < getNumSubRegIndices() &&
|
||||
"This is not a subregister index");
|
||||
return SubRegIndexNames[SubIdx-1];
|
||||
}
|
||||
|
||||
/// getSubRegIndexLaneMask - Return a bitmask representing the parts of a
|
||||
/// register that are covered by SubIdx.
|
||||
///
|
||||
/// Lane masks for sub-register indices are similar to register units for
|
||||
/// physical registers. The individual bits in a lane mask can't be assigned
|
||||
/// any specific meaning. They can be used to check if two sub-register
|
||||
/// indices overlap.
|
||||
///
|
||||
/// If the target has a register such that:
|
||||
///
|
||||
/// getSubReg(Reg, A) overlaps getSubReg(Reg, B)
|
||||
///
|
||||
/// then:
|
||||
///
|
||||
/// getSubRegIndexLaneMask(A) & getSubRegIndexLaneMask(B) != 0
|
||||
///
|
||||
/// The converse is not necessarily true. If two lane masks have a common
|
||||
/// bit, the corresponding sub-registers may not overlap, but it can be
|
||||
/// assumed that they usually will.
|
||||
unsigned getSubRegIndexLaneMask(unsigned SubIdx) const {
|
||||
// SubIdx == 0 is allowed, it has the lane mask ~0u.
|
||||
assert(SubIdx < getNumSubRegIndices() && "This is not a subregister index");
|
||||
return SubRegIndexLaneMasks[SubIdx];
|
||||
}
|
||||
|
||||
/// regsOverlap - Returns true if the two registers are equal or alias each
|
||||
/// other. The registers may be virtual register.
|
||||
bool regsOverlap(unsigned regA, unsigned regB) const {
|
||||
if (regA == regB) return true;
|
||||
if (isVirtualRegister(regA) || isVirtualRegister(regB))
|
||||
return false;
|
||||
|
||||
// Regunits are numerically ordered. Find a common unit.
|
||||
MCRegUnitIterator RUA(regA, this);
|
||||
MCRegUnitIterator RUB(regB, this);
|
||||
do {
|
||||
if (*RUA == *RUB) return true;
|
||||
if (*RUA < *RUB) ++RUA;
|
||||
else ++RUB;
|
||||
} while (RUA.isValid() && RUB.isValid());
|
||||
return false;
|
||||
}
|
||||
|
||||
/// hasRegUnit - Returns true if Reg contains RegUnit.
|
||||
bool hasRegUnit(unsigned Reg, unsigned RegUnit) const {
|
||||
for (MCRegUnitIterator Units(Reg, this); Units.isValid(); ++Units)
|
||||
if (*Units == RegUnit)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/// getCalleeSavedRegs - Return a null-terminated list of all of the
|
||||
/// callee saved registers on this target. The register should be in the
|
||||
/// order of desired callee-save stack frame offset. The first register is
|
||||
/// closest to the incoming stack pointer if stack grows down, and vice versa.
|
||||
///
|
||||
virtual const MCPhysReg* getCalleeSavedRegs(const MachineFunction *MF = 0)
|
||||
const = 0;
|
||||
|
||||
/// getCallPreservedMask - Return a mask of call-preserved registers for the
|
||||
/// given calling convention on the current sub-target. The mask should
|
||||
/// include all call-preserved aliases. This is used by the register
|
||||
/// allocator to determine which registers can be live across a call.
|
||||
///
|
||||
/// The mask is an array containing (TRI::getNumRegs()+31)/32 entries.
|
||||
/// A set bit indicates that all bits of the corresponding register are
|
||||
/// preserved across the function call. The bit mask is expected to be
|
||||
/// sub-register complete, i.e. if A is preserved, so are all its
|
||||
/// sub-registers.
|
||||
///
|
||||
/// Bits are numbered from the LSB, so the bit for physical register Reg can
|
||||
/// be found as (Mask[Reg / 32] >> Reg % 32) & 1.
|
||||
///
|
||||
/// A NULL pointer means that no register mask will be used, and call
|
||||
/// instructions should use implicit-def operands to indicate call clobbered
|
||||
/// registers.
|
||||
///
|
||||
virtual const uint32_t *getCallPreservedMask(CallingConv::ID) const {
|
||||
// The default mask clobbers everything. All targets should override.
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// getReservedRegs - Returns a bitset indexed by physical register number
|
||||
/// indicating if a register is a special register that has particular uses
|
||||
/// and should be considered unavailable at all times, e.g. SP, RA. This is
|
||||
/// used by register scavenger to determine what registers are free.
|
||||
virtual BitVector getReservedRegs(const MachineFunction &MF) const = 0;
|
||||
|
||||
/// getMatchingSuperReg - Return a super-register of the specified register
|
||||
/// Reg so its sub-register of index SubIdx is Reg.
|
||||
unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx,
|
||||
const TargetRegisterClass *RC) const {
|
||||
return MCRegisterInfo::getMatchingSuperReg(Reg, SubIdx, RC->MC);
|
||||
}
|
||||
|
||||
/// getMatchingSuperRegClass - Return a subclass of the specified register
|
||||
/// class A so that each register in it has a sub-register of the
|
||||
/// specified sub-register index which is in the specified register class B.
|
||||
///
|
||||
/// TableGen will synthesize missing A sub-classes.
|
||||
virtual const TargetRegisterClass *
|
||||
getMatchingSuperRegClass(const TargetRegisterClass *A,
|
||||
const TargetRegisterClass *B, unsigned Idx) const;
|
||||
|
||||
/// getSubClassWithSubReg - Returns the largest legal sub-class of RC that
|
||||
/// supports the sub-register index Idx.
|
||||
/// If no such sub-class exists, return NULL.
|
||||
/// If all registers in RC already have an Idx sub-register, return RC.
|
||||
///
|
||||
/// TableGen generates a version of this function that is good enough in most
|
||||
/// cases. Targets can override if they have constraints that TableGen
|
||||
/// doesn't understand. For example, the x86 sub_8bit sub-register index is
|
||||
/// supported by the full GR32 register class in 64-bit mode, but only by the
|
||||
/// GR32_ABCD regiister class in 32-bit mode.
|
||||
///
|
||||
/// TableGen will synthesize missing RC sub-classes.
|
||||
virtual const TargetRegisterClass *
|
||||
getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const {
|
||||
assert(Idx == 0 && "Target has no sub-registers");
|
||||
return RC;
|
||||
}
|
||||
|
||||
/// composeSubRegIndices - Return the subregister index you get from composing
|
||||
/// two subregister indices.
|
||||
///
|
||||
/// The special null sub-register index composes as the identity.
|
||||
///
|
||||
/// If R:a:b is the same register as R:c, then composeSubRegIndices(a, b)
|
||||
/// returns c. Note that composeSubRegIndices does not tell you about illegal
|
||||
/// compositions. If R does not have a subreg a, or R:a does not have a subreg
|
||||
/// b, composeSubRegIndices doesn't tell you.
|
||||
///
|
||||
/// The ARM register Q0 has two D subregs dsub_0:D0 and dsub_1:D1. It also has
|
||||
/// ssub_0:S0 - ssub_3:S3 subregs.
|
||||
/// If you compose subreg indices dsub_1, ssub_0 you get ssub_2.
|
||||
///
|
||||
unsigned composeSubRegIndices(unsigned a, unsigned b) const {
|
||||
if (!a) return b;
|
||||
if (!b) return a;
|
||||
return composeSubRegIndicesImpl(a, b);
|
||||
}
|
||||
|
||||
protected:
|
||||
/// Overridden by TableGen in targets that have sub-registers.
|
||||
virtual unsigned composeSubRegIndicesImpl(unsigned, unsigned) const {
|
||||
llvm_unreachable("Target has no sub-registers");
|
||||
}
|
||||
|
||||
public:
|
||||
/// getCommonSuperRegClass - Find a common super-register class if it exists.
|
||||
///
|
||||
/// Find a register class, SuperRC and two sub-register indices, PreA and
|
||||
/// PreB, such that:
|
||||
///
|
||||
/// 1. PreA + SubA == PreB + SubB (using composeSubRegIndices()), and
|
||||
///
|
||||
/// 2. For all Reg in SuperRC: Reg:PreA in RCA and Reg:PreB in RCB, and
|
||||
///
|
||||
/// 3. SuperRC->getSize() >= max(RCA->getSize(), RCB->getSize()).
|
||||
///
|
||||
/// SuperRC will be chosen such that no super-class of SuperRC satisfies the
|
||||
/// requirements, and there is no register class with a smaller spill size
|
||||
/// that satisfies the requirements.
|
||||
///
|
||||
/// SubA and SubB must not be 0. Use getMatchingSuperRegClass() instead.
|
||||
///
|
||||
/// Either of the PreA and PreB sub-register indices may be returned as 0. In
|
||||
/// that case, the returned register class will be a sub-class of the
|
||||
/// corresponding argument register class.
|
||||
///
|
||||
/// The function returns NULL if no register class can be found.
|
||||
///
|
||||
const TargetRegisterClass*
|
||||
getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
|
||||
const TargetRegisterClass *RCB, unsigned SubB,
|
||||
unsigned &PreA, unsigned &PreB) const;
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Register Class Information
|
||||
//
|
||||
|
||||
/// Register class iterators
|
||||
///
|
||||
regclass_iterator regclass_begin() const { return RegClassBegin; }
|
||||
regclass_iterator regclass_end() const { return RegClassEnd; }
|
||||
|
||||
unsigned getNumRegClasses() const {
|
||||
return (unsigned)(regclass_end()-regclass_begin());
|
||||
}
|
||||
|
||||
/// getRegClass - Returns the register class associated with the enumeration
|
||||
/// value. See class MCOperandInfo.
|
||||
const TargetRegisterClass *getRegClass(unsigned i) const {
|
||||
assert(i < getNumRegClasses() && "Register Class ID out of range");
|
||||
return RegClassBegin[i];
|
||||
}
|
||||
|
||||
/// getCommonSubClass - find the largest common subclass of A and B. Return
|
||||
/// NULL if there is no common subclass.
|
||||
const TargetRegisterClass *
|
||||
getCommonSubClass(const TargetRegisterClass *A,
|
||||
const TargetRegisterClass *B) const;
|
||||
|
||||
/// getPointerRegClass - Returns a TargetRegisterClass used for pointer
|
||||
/// values. If a target supports multiple different pointer register classes,
|
||||
/// kind specifies which one is indicated.
|
||||
virtual const TargetRegisterClass *
|
||||
getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const {
|
||||
llvm_unreachable("Target didn't implement getPointerRegClass!");
|
||||
}
|
||||
|
||||
/// getCrossCopyRegClass - Returns a legal register class to copy a register
|
||||
/// in the specified class to or from. If it is possible to copy the register
|
||||
/// directly without using a cross register class copy, return the specified
|
||||
/// RC. Returns NULL if it is not possible to copy between a two registers of
|
||||
/// the specified class.
|
||||
virtual const TargetRegisterClass *
|
||||
getCrossCopyRegClass(const TargetRegisterClass *RC) const {
|
||||
return RC;
|
||||
}
|
||||
|
||||
/// getLargestLegalSuperClass - Returns the largest super class of RC that is
|
||||
/// legal to use in the current sub-target and has the same spill size.
|
||||
/// The returned register class can be used to create virtual registers which
|
||||
/// means that all its registers can be copied and spilled.
|
||||
virtual const TargetRegisterClass*
|
||||
getLargestLegalSuperClass(const TargetRegisterClass *RC) const {
|
||||
/// The default implementation is very conservative and doesn't allow the
|
||||
/// register allocator to inflate register classes.
|
||||
return RC;
|
||||
}
|
||||
|
||||
/// getRegPressureLimit - Return the register pressure "high water mark" for
|
||||
/// the specific register class. The scheduler is in high register pressure
|
||||
/// mode (for the specific register class) if it goes over the limit.
|
||||
///
|
||||
/// Note: this is the old register pressure model that relies on a manually
|
||||
/// specified representative register class per value type.
|
||||
virtual unsigned getRegPressureLimit(const TargetRegisterClass *RC,
|
||||
MachineFunction &MF) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Get the weight in units of pressure for this register class.
|
||||
virtual const RegClassWeight &getRegClassWeight(
|
||||
const TargetRegisterClass *RC) const = 0;
|
||||
|
||||
/// Get the weight in units of pressure for this register unit.
|
||||
virtual unsigned getRegUnitWeight(unsigned RegUnit) const = 0;
|
||||
|
||||
/// Get the number of dimensions of register pressure.
|
||||
virtual unsigned getNumRegPressureSets() const = 0;
|
||||
|
||||
/// Get the name of this register unit pressure set.
|
||||
virtual const char *getRegPressureSetName(unsigned Idx) const = 0;
|
||||
|
||||
/// Get the register unit pressure limit for this dimension.
|
||||
/// This limit must be adjusted dynamically for reserved registers.
|
||||
virtual unsigned getRegPressureSetLimit(unsigned Idx) const = 0;
|
||||
|
||||
/// Get the dimensions of register pressure impacted by this register class.
|
||||
/// Returns a -1 terminated array of pressure set IDs.
|
||||
virtual const int *getRegClassPressureSets(
|
||||
const TargetRegisterClass *RC) const = 0;
|
||||
|
||||
/// Get the dimensions of register pressure impacted by this register unit.
|
||||
/// Returns a -1 terminated array of pressure set IDs.
|
||||
virtual const int *getRegUnitPressureSets(unsigned RegUnit) const = 0;
|
||||
|
||||
/// Get a list of 'hint' registers that the register allocator should try
|
||||
/// first when allocating a physical register for the virtual register
|
||||
/// VirtReg. These registers are effectively moved to the front of the
|
||||
/// allocation order.
|
||||
///
|
||||
/// The Order argument is the allocation order for VirtReg's register class
|
||||
/// as returned from RegisterClassInfo::getOrder(). The hint registers must
|
||||
/// come from Order, and they must not be reserved.
|
||||
///
|
||||
/// The default implementation of this function can resolve
|
||||
/// target-independent hints provided to MRI::setRegAllocationHint with
|
||||
/// HintType == 0. Targets that override this function should defer to the
|
||||
/// default implementation if they have no reason to change the allocation
|
||||
/// order for VirtReg. There may be target-independent hints.
|
||||
virtual void getRegAllocationHints(unsigned VirtReg,
|
||||
ArrayRef<MCPhysReg> Order,
|
||||
SmallVectorImpl<MCPhysReg> &Hints,
|
||||
const MachineFunction &MF,
|
||||
const VirtRegMap *VRM = 0) const;
|
||||
|
||||
/// avoidWriteAfterWrite - Return true if the register allocator should avoid
|
||||
/// writing a register from RC in two consecutive instructions.
|
||||
/// This can avoid pipeline stalls on certain architectures.
|
||||
/// It does cause increased register pressure, though.
|
||||
virtual bool avoidWriteAfterWrite(const TargetRegisterClass *RC) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// UpdateRegAllocHint - A callback to allow target a chance to update
|
||||
/// register allocation hints when a register is "changed" (e.g. coalesced)
|
||||
/// to another register. e.g. On ARM, some virtual registers should target
|
||||
/// register pairs, if one of pair is coalesced to another register, the
|
||||
/// allocation hint of the other half of the pair should be changed to point
|
||||
/// to the new register.
|
||||
virtual void UpdateRegAllocHint(unsigned Reg, unsigned NewReg,
|
||||
MachineFunction &MF) const {
|
||||
// Do nothing.
|
||||
}
|
||||
|
||||
/// requiresRegisterScavenging - returns true if the target requires (and can
|
||||
/// make use of) the register scavenger.
|
||||
virtual bool requiresRegisterScavenging(const MachineFunction &MF) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// useFPForScavengingIndex - returns true if the target wants to use
|
||||
/// frame pointer based accesses to spill to the scavenger emergency spill
|
||||
/// slot.
|
||||
virtual bool useFPForScavengingIndex(const MachineFunction &MF) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
/// requiresFrameIndexScavenging - returns true if the target requires post
|
||||
/// PEI scavenging of registers for materializing frame index constants.
|
||||
virtual bool requiresFrameIndexScavenging(const MachineFunction &MF) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// requiresVirtualBaseRegisters - Returns true if the target wants the
|
||||
/// LocalStackAllocation pass to be run and virtual base registers
|
||||
/// used for more efficient stack access.
|
||||
virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// hasReservedSpillSlot - Return true if target has reserved a spill slot in
|
||||
/// the stack frame of the given function for the specified register. e.g. On
|
||||
/// x86, if the frame register is required, the first fixed stack object is
|
||||
/// reserved as its spill slot. This tells PEI not to create a new stack frame
|
||||
/// object for the given register. It should be called only after
|
||||
/// processFunctionBeforeCalleeSavedScan().
|
||||
virtual bool hasReservedSpillSlot(const MachineFunction &MF, unsigned Reg,
|
||||
int &FrameIdx) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// trackLivenessAfterRegAlloc - returns true if the live-ins should be tracked
|
||||
/// after register allocation.
|
||||
virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// needsStackRealignment - true if storage within the function requires the
|
||||
/// stack pointer to be aligned more than the normal calling convention calls
|
||||
/// for.
|
||||
virtual bool needsStackRealignment(const MachineFunction &MF) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// getFrameIndexInstrOffset - Get the offset from the referenced frame
|
||||
/// index in the instruction, if there is one.
|
||||
virtual int64_t getFrameIndexInstrOffset(const MachineInstr *MI,
|
||||
int Idx) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// needsFrameBaseReg - Returns true if the instruction's frame index
|
||||
/// reference would be better served by a base register other than FP
|
||||
/// or SP. Used by LocalStackFrameAllocation to determine which frame index
|
||||
/// references it should create new base registers for.
|
||||
virtual bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// materializeFrameBaseRegister - Insert defining instruction(s) for
|
||||
/// BaseReg to be a pointer to FrameIdx before insertion point I.
|
||||
virtual void materializeFrameBaseRegister(MachineBasicBlock *MBB,
|
||||
unsigned BaseReg, int FrameIdx,
|
||||
int64_t Offset) const {
|
||||
llvm_unreachable("materializeFrameBaseRegister does not exist on this "
|
||||
"target");
|
||||
}
|
||||
|
||||
/// resolveFrameIndex - Resolve a frame index operand of an instruction
|
||||
/// to reference the indicated base register plus offset instead.
|
||||
virtual void resolveFrameIndex(MachineBasicBlock::iterator I,
|
||||
unsigned BaseReg, int64_t Offset) const {
|
||||
llvm_unreachable("resolveFrameIndex does not exist on this target");
|
||||
}
|
||||
|
||||
/// isFrameOffsetLegal - Determine whether a given offset immediate is
|
||||
/// encodable to resolve a frame index.
|
||||
virtual bool isFrameOffsetLegal(const MachineInstr *MI,
|
||||
int64_t Offset) const {
|
||||
llvm_unreachable("isFrameOffsetLegal does not exist on this target");
|
||||
}
|
||||
|
||||
|
||||
/// saveScavengerRegister - Spill the register so it can be used by the
|
||||
/// register scavenger. Return true if the register was spilled, false
|
||||
/// otherwise. If this function does not spill the register, the scavenger
|
||||
/// will instead spill it to the emergency spill slot.
|
||||
///
|
||||
virtual bool saveScavengerRegister(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I,
|
||||
MachineBasicBlock::iterator &UseMI,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Reg) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// eliminateFrameIndex - This method must be overriden to eliminate abstract
|
||||
/// frame indices from instructions which may use them. The instruction
|
||||
/// referenced by the iterator contains an MO_FrameIndex operand which must be
|
||||
/// eliminated by this method. This method may modify or replace the
|
||||
/// specified instruction, as long as it keeps the iterator pointing at the
|
||||
/// finished product. SPAdj is the SP adjustment due to call frame setup
|
||||
/// instruction. FIOperandNum is the FI operand number.
|
||||
virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI,
|
||||
int SPAdj, unsigned FIOperandNum,
|
||||
RegScavenger *RS = NULL) const = 0;
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
/// Debug information queries.
|
||||
|
||||
/// getFrameRegister - This method should return the register used as a base
|
||||
/// for values allocated in the current stack frame.
|
||||
virtual unsigned getFrameRegister(const MachineFunction &MF) const = 0;
|
||||
|
||||
/// getCompactUnwindRegNum - This function maps the register to the number for
|
||||
/// compact unwind encoding. Return -1 if the register isn't valid.
|
||||
virtual int getCompactUnwindRegNum(unsigned, bool) const {
|
||||
return -1;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// SuperRegClassIterator
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Iterate over the possible super-registers for a given register class. The
|
||||
// iterator will visit a list of pairs (Idx, Mask) corresponding to the
|
||||
// possible classes of super-registers.
|
||||
//
|
||||
// Each bit mask will have at least one set bit, and each set bit in Mask
|
||||
// corresponds to a SuperRC such that:
|
||||
//
|
||||
// For all Reg in SuperRC: Reg:Idx is in RC.
|
||||
//
|
||||
// The iterator can include (O, RC->getSubClassMask()) as the first entry which
|
||||
// also satisfies the above requirement, assuming Reg:0 == Reg.
|
||||
//
|
||||
class SuperRegClassIterator {
|
||||
const unsigned RCMaskWords;
|
||||
unsigned SubReg;
|
||||
const uint16_t *Idx;
|
||||
const uint32_t *Mask;
|
||||
|
||||
public:
|
||||
/// Create a SuperRegClassIterator that visits all the super-register classes
|
||||
/// of RC. When IncludeSelf is set, also include the (0, sub-classes) entry.
|
||||
SuperRegClassIterator(const TargetRegisterClass *RC,
|
||||
const TargetRegisterInfo *TRI,
|
||||
bool IncludeSelf = false)
|
||||
: RCMaskWords((TRI->getNumRegClasses() + 31) / 32),
|
||||
SubReg(0),
|
||||
Idx(RC->getSuperRegIndices()),
|
||||
Mask(RC->getSubClassMask()) {
|
||||
if (!IncludeSelf)
|
||||
++*this;
|
||||
}
|
||||
|
||||
/// Returns true if this iterator is still pointing at a valid entry.
|
||||
bool isValid() const { return Idx; }
|
||||
|
||||
/// Returns the current sub-register index.
|
||||
unsigned getSubReg() const { return SubReg; }
|
||||
|
||||
/// Returns the bit mask if register classes that getSubReg() projects into
|
||||
/// RC.
|
||||
const uint32_t *getMask() const { return Mask; }
|
||||
|
||||
/// Advance iterator to the next entry.
|
||||
void operator++() {
|
||||
assert(isValid() && "Cannot move iterator past end.");
|
||||
Mask += RCMaskWords;
|
||||
SubReg = *Idx++;
|
||||
if (!SubReg)
|
||||
Idx = 0;
|
||||
}
|
||||
};
|
||||
|
||||
// This is useful when building IndexedMaps keyed on virtual registers
|
||||
struct VirtReg2IndexFunctor : public std::unary_function<unsigned, unsigned> {
|
||||
unsigned operator()(unsigned Reg) const {
|
||||
return TargetRegisterInfo::virtReg2Index(Reg);
|
||||
}
|
||||
};
|
||||
|
||||
/// PrintReg - Helper class for printing registers on a raw_ostream.
|
||||
/// Prints virtual and physical registers with or without a TRI instance.
|
||||
///
|
||||
/// The format is:
|
||||
/// %noreg - NoRegister
|
||||
/// %vreg5 - a virtual register.
|
||||
/// %vreg5:sub_8bit - a virtual register with sub-register index (with TRI).
|
||||
/// %EAX - a physical register
|
||||
/// %physreg17 - a physical register when no TRI instance given.
|
||||
///
|
||||
/// Usage: OS << PrintReg(Reg, TRI) << '\n';
|
||||
///
|
||||
class PrintReg {
|
||||
const TargetRegisterInfo *TRI;
|
||||
unsigned Reg;
|
||||
unsigned SubIdx;
|
||||
public:
|
||||
explicit PrintReg(unsigned reg, const TargetRegisterInfo *tri = 0,
|
||||
unsigned subidx = 0)
|
||||
: TRI(tri), Reg(reg), SubIdx(subidx) {}
|
||||
void print(raw_ostream&) const;
|
||||
};
|
||||
|
||||
static inline raw_ostream &operator<<(raw_ostream &OS, const PrintReg &PR) {
|
||||
PR.print(OS);
|
||||
return OS;
|
||||
}
|
||||
|
||||
/// PrintRegUnit - Helper class for printing register units on a raw_ostream.
|
||||
///
|
||||
/// Register units are named after their root registers:
|
||||
///
|
||||
/// AL - Single root.
|
||||
/// FP0~ST7 - Dual roots.
|
||||
///
|
||||
/// Usage: OS << PrintRegUnit(Unit, TRI) << '\n';
|
||||
///
|
||||
class PrintRegUnit {
|
||||
const TargetRegisterInfo *TRI;
|
||||
unsigned Unit;
|
||||
public:
|
||||
PrintRegUnit(unsigned unit, const TargetRegisterInfo *tri)
|
||||
: TRI(tri), Unit(unit) {}
|
||||
void print(raw_ostream&) const;
|
||||
};
|
||||
|
||||
static inline raw_ostream &operator<<(raw_ostream &OS, const PrintRegUnit &PR) {
|
||||
PR.print(OS);
|
||||
return OS;
|
||||
}
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
101
thirdparty/clang/include/llvm/Target/TargetSelectionDAGInfo.h
vendored
Normal file
101
thirdparty/clang/include/llvm/Target/TargetSelectionDAGInfo.h
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
//==-- llvm/Target/TargetSelectionDAGInfo.h - SelectionDAG Info --*- C++ -*-==//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file declares the TargetSelectionDAGInfo class, which targets can
|
||||
// subclass to parameterize the SelectionDAG lowering and instruction
|
||||
// selection process.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_TARGET_TARGETSELECTIONDAGINFO_H
|
||||
#define LLVM_TARGET_TARGETSELECTIONDAGINFO_H
|
||||
|
||||
#include "llvm/CodeGen/SelectionDAGNodes.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class DataLayout;
|
||||
class TargetMachine;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
/// TargetSelectionDAGInfo - Targets can subclass this to parameterize the
|
||||
/// SelectionDAG lowering and instruction selection process.
|
||||
///
|
||||
class TargetSelectionDAGInfo {
|
||||
TargetSelectionDAGInfo(const TargetSelectionDAGInfo &) LLVM_DELETED_FUNCTION;
|
||||
void operator=(const TargetSelectionDAGInfo &) LLVM_DELETED_FUNCTION;
|
||||
|
||||
const DataLayout *TD;
|
||||
|
||||
protected:
|
||||
const DataLayout *getDataLayout() const { return TD; }
|
||||
|
||||
public:
|
||||
explicit TargetSelectionDAGInfo(const TargetMachine &TM);
|
||||
virtual ~TargetSelectionDAGInfo();
|
||||
|
||||
/// EmitTargetCodeForMemcpy - Emit target-specific code that performs a
|
||||
/// memcpy. This can be used by targets to provide code sequences for cases
|
||||
/// that don't fit the target's parameters for simple loads/stores and can be
|
||||
/// more efficient than using a library call. This function can return a null
|
||||
/// SDValue if the target declines to use custom code and a different
|
||||
/// lowering strategy should be used.
|
||||
///
|
||||
/// If AlwaysInline is true, the size is constant and the target should not
|
||||
/// emit any calls and is strongly encouraged to attempt to emit inline code
|
||||
/// even if it is beyond the usual threshold because this intrinsic is being
|
||||
/// expanded in a place where calls are not feasible (e.g. within the prologue
|
||||
/// for another call). If the target chooses to decline an AlwaysInline
|
||||
/// request here, legalize will resort to using simple loads and stores.
|
||||
virtual SDValue
|
||||
EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
|
||||
SDValue Chain,
|
||||
SDValue Op1, SDValue Op2,
|
||||
SDValue Op3, unsigned Align, bool isVolatile,
|
||||
bool AlwaysInline,
|
||||
MachinePointerInfo DstPtrInfo,
|
||||
MachinePointerInfo SrcPtrInfo) const {
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
/// EmitTargetCodeForMemmove - Emit target-specific code that performs a
|
||||
/// memmove. This can be used by targets to provide code sequences for cases
|
||||
/// that don't fit the target's parameters for simple loads/stores and can be
|
||||
/// more efficient than using a library call. This function can return a null
|
||||
/// SDValue if the target declines to use custom code and a different
|
||||
/// lowering strategy should be used.
|
||||
virtual SDValue
|
||||
EmitTargetCodeForMemmove(SelectionDAG &DAG, DebugLoc dl,
|
||||
SDValue Chain,
|
||||
SDValue Op1, SDValue Op2,
|
||||
SDValue Op3, unsigned Align, bool isVolatile,
|
||||
MachinePointerInfo DstPtrInfo,
|
||||
MachinePointerInfo SrcPtrInfo) const {
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
/// EmitTargetCodeForMemset - Emit target-specific code that performs a
|
||||
/// memset. This can be used by targets to provide code sequences for cases
|
||||
/// that don't fit the target's parameters for simple stores and can be more
|
||||
/// efficient than using a library call. This function can return a null
|
||||
/// SDValue if the target declines to use custom code and a different
|
||||
/// lowering strategy should be used.
|
||||
virtual SDValue
|
||||
EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
|
||||
SDValue Chain,
|
||||
SDValue Op1, SDValue Op2,
|
||||
SDValue Op3, unsigned Align, bool isVolatile,
|
||||
MachinePointerInfo DstPtrInfo) const {
|
||||
return SDValue();
|
||||
}
|
||||
};
|
||||
|
||||
} // end llvm namespace
|
||||
|
||||
#endif
|
||||
84
thirdparty/clang/include/llvm/Target/TargetSubtargetInfo.h
vendored
Normal file
84
thirdparty/clang/include/llvm/Target/TargetSubtargetInfo.h
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
//==-- llvm/Target/TargetSubtargetInfo.h - Target Information ----*- C++ -*-==//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file describes the subtarget options of a Target machine.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_TARGET_TARGETSUBTARGETINFO_H
|
||||
#define LLVM_TARGET_TARGETSUBTARGETINFO_H
|
||||
|
||||
#include "llvm/MC/MCSubtargetInfo.h"
|
||||
#include "llvm/Support/CodeGen.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class MachineFunction;
|
||||
class MachineInstr;
|
||||
class SDep;
|
||||
class SUnit;
|
||||
class TargetRegisterClass;
|
||||
class TargetSchedModel;
|
||||
template <typename T> class SmallVectorImpl;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
///
|
||||
/// TargetSubtargetInfo - Generic base class for all target subtargets. All
|
||||
/// Target-specific options that control code generation and printing should
|
||||
/// be exposed through a TargetSubtargetInfo-derived class.
|
||||
///
|
||||
class TargetSubtargetInfo : public MCSubtargetInfo {
|
||||
TargetSubtargetInfo(const TargetSubtargetInfo&) LLVM_DELETED_FUNCTION;
|
||||
void operator=(const TargetSubtargetInfo&) LLVM_DELETED_FUNCTION;
|
||||
protected: // Can only create subclasses...
|
||||
TargetSubtargetInfo();
|
||||
public:
|
||||
// AntiDepBreakMode - Type of anti-dependence breaking that should
|
||||
// be performed before post-RA scheduling.
|
||||
typedef enum { ANTIDEP_NONE, ANTIDEP_CRITICAL, ANTIDEP_ALL } AntiDepBreakMode;
|
||||
typedef SmallVectorImpl<const TargetRegisterClass*> RegClassVector;
|
||||
|
||||
virtual ~TargetSubtargetInfo();
|
||||
|
||||
/// Resolve a SchedClass at runtime, where SchedClass identifies an
|
||||
/// MCSchedClassDesc with the isVariant property. This may return the ID of
|
||||
/// another variant SchedClass, but repeated invocation must quickly terminate
|
||||
/// in a nonvariant SchedClass.
|
||||
virtual unsigned resolveSchedClass(unsigned SchedClass, const MachineInstr *MI,
|
||||
const TargetSchedModel* SchedModel) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// \brief True if the subtarget should run MachineScheduler after aggressive
|
||||
/// coalescing.
|
||||
///
|
||||
/// This currently replaces the SelectionDAG scheduler with the "source" order
|
||||
/// scheduler. It does not yet disable the postRA scheduler.
|
||||
virtual bool enableMachineScheduler() const;
|
||||
|
||||
// enablePostRAScheduler - If the target can benefit from post-regalloc
|
||||
// scheduling and the specified optimization level meets the requirement
|
||||
// return true to enable post-register-allocation scheduling. In
|
||||
// CriticalPathRCs return any register classes that should only be broken
|
||||
// if on the critical path.
|
||||
virtual bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
|
||||
AntiDepBreakMode& Mode,
|
||||
RegClassVector& CriticalPathRCs) const;
|
||||
// adjustSchedDependency - Perform target specific adjustments to
|
||||
// the latency of a schedule dependency.
|
||||
virtual void adjustSchedDependency(SUnit *def, SUnit *use,
|
||||
SDep& dep) const { }
|
||||
|
||||
/// \brief Reset the features for the subtarget.
|
||||
virtual void resetSubtargetFeatures(const MachineFunction *MF) { }
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
Reference in New Issue
Block a user