initial
This commit is contained in:
93
thirdparty/clang/include/llvm/CodeGen/Analysis.h
vendored
Normal file
93
thirdparty/clang/include/llvm/CodeGen/Analysis.h
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
//===- CodeGen/Analysis.h - CodeGen LLVM IR Analysis Utilities --*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file declares several CodeGen-specific LLVM IR analysis utilties.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_ANALYSIS_H
|
||||
#define LLVM_CODEGEN_ANALYSIS_H
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/CodeGen/ISDOpcodes.h"
|
||||
#include "llvm/CodeGen/ValueTypes.h"
|
||||
#include "llvm/IR/InlineAsm.h"
|
||||
#include "llvm/IR/Instructions.h"
|
||||
#include "llvm/Support/CallSite.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class GlobalVariable;
|
||||
class TargetLowering;
|
||||
class SDNode;
|
||||
class SDValue;
|
||||
class SelectionDAG;
|
||||
|
||||
/// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
|
||||
/// of insertvalue or extractvalue indices that identify a member, return
|
||||
/// the linearized index of the start of the member.
|
||||
///
|
||||
unsigned ComputeLinearIndex(Type *Ty,
|
||||
const unsigned *Indices,
|
||||
const unsigned *IndicesEnd,
|
||||
unsigned CurIndex = 0);
|
||||
|
||||
inline unsigned ComputeLinearIndex(Type *Ty,
|
||||
ArrayRef<unsigned> Indices,
|
||||
unsigned CurIndex = 0) {
|
||||
return ComputeLinearIndex(Ty, Indices.begin(), Indices.end(), CurIndex);
|
||||
}
|
||||
|
||||
/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
|
||||
/// EVTs that represent all the individual underlying
|
||||
/// non-aggregate types that comprise it.
|
||||
///
|
||||
/// If Offsets is non-null, it points to a vector to be filled in
|
||||
/// with the in-memory offsets of each of the individual values.
|
||||
///
|
||||
void ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
|
||||
SmallVectorImpl<EVT> &ValueVTs,
|
||||
SmallVectorImpl<uint64_t> *Offsets = 0,
|
||||
uint64_t StartingOffset = 0);
|
||||
|
||||
/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
|
||||
GlobalVariable *ExtractTypeInfo(Value *V);
|
||||
|
||||
/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
|
||||
/// processed uses a memory 'm' constraint.
|
||||
bool hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
|
||||
const TargetLowering &TLI);
|
||||
|
||||
/// getFCmpCondCode - Return the ISD condition code corresponding to
|
||||
/// the given LLVM IR floating-point condition code. This includes
|
||||
/// consideration of global floating-point math flags.
|
||||
///
|
||||
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred);
|
||||
|
||||
/// getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats,
|
||||
/// return the equivalent code if we're allowed to assume that NaNs won't occur.
|
||||
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC);
|
||||
|
||||
/// getICmpCondCode - Return the ISD condition code corresponding to
|
||||
/// the given LLVM IR integer condition code.
|
||||
///
|
||||
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred);
|
||||
|
||||
/// Test if the given instruction is in a position to be optimized
|
||||
/// with a tail-call. This roughly means that it's in a block with
|
||||
/// a return and there's nothing that needs to be scheduled
|
||||
/// between it and the return.
|
||||
///
|
||||
/// This function only tests target-independent requirements.
|
||||
bool isInTailCallPosition(ImmutableCallSite CS, const TargetLowering &TLI);
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
490
thirdparty/clang/include/llvm/CodeGen/AsmPrinter.h
vendored
Normal file
490
thirdparty/clang/include/llvm/CodeGen/AsmPrinter.h
vendored
Normal file
@@ -0,0 +1,490 @@
|
||||
//===-- llvm/CodeGen/AsmPrinter.h - AsmPrinter Framework --------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file contains a class to be used as the base class for target specific
|
||||
// asm writers. This class primarily handles common functionality used by
|
||||
// all asm writers.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_ASMPRINTER_H
|
||||
#define LLVM_CODEGEN_ASMPRINTER_H
|
||||
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
#include "llvm/IR/InlineAsm.h"
|
||||
#include "llvm/Support/DataTypes.h"
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
|
||||
namespace llvm {
|
||||
class BlockAddress;
|
||||
class GCStrategy;
|
||||
class Constant;
|
||||
class ConstantArray;
|
||||
class GCMetadataPrinter;
|
||||
class GlobalValue;
|
||||
class GlobalVariable;
|
||||
class MachineBasicBlock;
|
||||
class MachineFunction;
|
||||
class MachineInstr;
|
||||
class MachineLocation;
|
||||
class MachineLoopInfo;
|
||||
class MachineLoop;
|
||||
class MachineConstantPoolValue;
|
||||
class MachineJumpTableInfo;
|
||||
class MachineModuleInfo;
|
||||
class MachineMove;
|
||||
class MCAsmInfo;
|
||||
class MCContext;
|
||||
class MCSection;
|
||||
class MCStreamer;
|
||||
class MCSymbol;
|
||||
class MDNode;
|
||||
class DwarfDebug;
|
||||
class DwarfException;
|
||||
class Mangler;
|
||||
class TargetLoweringObjectFile;
|
||||
class DataLayout;
|
||||
class TargetMachine;
|
||||
|
||||
/// AsmPrinter - This class is intended to be used as a driving class for all
|
||||
/// asm writers.
|
||||
class AsmPrinter : public MachineFunctionPass {
|
||||
public:
|
||||
/// Target machine description.
|
||||
///
|
||||
TargetMachine &TM;
|
||||
|
||||
/// Target Asm Printer information.
|
||||
///
|
||||
const MCAsmInfo *MAI;
|
||||
|
||||
/// OutContext - This is the context for the output file that we are
|
||||
/// streaming. This owns all of the global MC-related objects for the
|
||||
/// generated translation unit.
|
||||
MCContext &OutContext;
|
||||
|
||||
/// OutStreamer - This is the MCStreamer object for the file we are
|
||||
/// generating. This contains the transient state for the current
|
||||
/// translation unit that we are generating (such as the current section
|
||||
/// etc).
|
||||
MCStreamer &OutStreamer;
|
||||
|
||||
/// The current machine function.
|
||||
const MachineFunction *MF;
|
||||
|
||||
/// MMI - This is a pointer to the current MachineModuleInfo.
|
||||
MachineModuleInfo *MMI;
|
||||
|
||||
/// Name-mangler for global names.
|
||||
///
|
||||
Mangler *Mang;
|
||||
|
||||
/// The symbol for the current function. This is recalculated at the
|
||||
/// beginning of each call to runOnMachineFunction().
|
||||
///
|
||||
MCSymbol *CurrentFnSym;
|
||||
|
||||
/// The symbol used to represent the start of the current function for the
|
||||
/// purpose of calculating its size (e.g. using the .size directive). By
|
||||
/// default, this is equal to CurrentFnSym.
|
||||
MCSymbol *CurrentFnSymForSize;
|
||||
|
||||
private:
|
||||
// GCMetadataPrinters - The garbage collection metadata printer table.
|
||||
void *GCMetadataPrinters; // Really a DenseMap.
|
||||
|
||||
/// VerboseAsm - Emit comments in assembly output if this is true.
|
||||
///
|
||||
bool VerboseAsm;
|
||||
static char ID;
|
||||
|
||||
/// If VerboseAsm is set, a pointer to the loop info for this
|
||||
/// function.
|
||||
MachineLoopInfo *LI;
|
||||
|
||||
/// DD - If the target supports dwarf debug info, this pointer is non-null.
|
||||
DwarfDebug *DD;
|
||||
|
||||
/// DE - If the target supports dwarf exception info, this pointer is
|
||||
/// non-null.
|
||||
DwarfException *DE;
|
||||
|
||||
protected:
|
||||
explicit AsmPrinter(TargetMachine &TM, MCStreamer &Streamer);
|
||||
|
||||
public:
|
||||
virtual ~AsmPrinter();
|
||||
|
||||
/// isVerbose - Return true if assembly output should contain comments.
|
||||
///
|
||||
bool isVerbose() const { return VerboseAsm; }
|
||||
|
||||
/// getFunctionNumber - Return a unique ID for the current function.
|
||||
///
|
||||
unsigned getFunctionNumber() const;
|
||||
|
||||
/// getObjFileLowering - Return information about object file lowering.
|
||||
const TargetLoweringObjectFile &getObjFileLowering() const;
|
||||
|
||||
/// getDataLayout - Return information about data layout.
|
||||
const DataLayout &getDataLayout() const;
|
||||
|
||||
/// getCurrentSection() - Return the current section we are emitting to.
|
||||
const MCSection *getCurrentSection() const;
|
||||
|
||||
|
||||
//===------------------------------------------------------------------===//
|
||||
// MachineFunctionPass Implementation.
|
||||
//===------------------------------------------------------------------===//
|
||||
|
||||
/// getAnalysisUsage - Record analysis usage.
|
||||
///
|
||||
void getAnalysisUsage(AnalysisUsage &AU) const;
|
||||
|
||||
/// doInitialization - Set up the AsmPrinter when we are working on a new
|
||||
/// module. If your pass overrides this, it must make sure to explicitly
|
||||
/// call this implementation.
|
||||
bool doInitialization(Module &M);
|
||||
|
||||
/// doFinalization - Shut down the asmprinter. If you override this in your
|
||||
/// pass, you must make sure to call it explicitly.
|
||||
bool doFinalization(Module &M);
|
||||
|
||||
/// runOnMachineFunction - Emit the specified function out to the
|
||||
/// OutStreamer.
|
||||
virtual bool runOnMachineFunction(MachineFunction &MF) {
|
||||
SetupMachineFunction(MF);
|
||||
EmitFunctionHeader();
|
||||
EmitFunctionBody();
|
||||
return false;
|
||||
}
|
||||
|
||||
//===------------------------------------------------------------------===//
|
||||
// Coarse grained IR lowering routines.
|
||||
//===------------------------------------------------------------------===//
|
||||
|
||||
/// SetupMachineFunction - This should be called when a new MachineFunction
|
||||
/// is being processed from runOnMachineFunction.
|
||||
void SetupMachineFunction(MachineFunction &MF);
|
||||
|
||||
/// EmitFunctionHeader - This method emits the header for the current
|
||||
/// function.
|
||||
void EmitFunctionHeader();
|
||||
|
||||
/// EmitFunctionBody - This method emits the body and trailer for a
|
||||
/// function.
|
||||
void EmitFunctionBody();
|
||||
|
||||
void emitPrologLabel(const MachineInstr &MI);
|
||||
|
||||
enum CFIMoveType {
|
||||
CFI_M_None,
|
||||
CFI_M_EH,
|
||||
CFI_M_Debug
|
||||
};
|
||||
CFIMoveType needsCFIMoves();
|
||||
|
||||
bool needsSEHMoves();
|
||||
|
||||
/// needsRelocationsForDwarfStringPool - Specifies whether the object format
|
||||
/// expects to use relocations to refer to debug entries. Alternatively we
|
||||
/// emit section offsets in bytes from the start of the string pool.
|
||||
bool needsRelocationsForDwarfStringPool() const;
|
||||
|
||||
/// EmitConstantPool - Print to the current output stream assembly
|
||||
/// representations of the constants in the constant pool MCP. This is
|
||||
/// used to print out constants which have been "spilled to memory" by
|
||||
/// the code generator.
|
||||
///
|
||||
virtual void EmitConstantPool();
|
||||
|
||||
/// EmitJumpTableInfo - Print assembly representations of the jump tables
|
||||
/// used by the current function to the current output stream.
|
||||
///
|
||||
void EmitJumpTableInfo();
|
||||
|
||||
/// EmitGlobalVariable - Emit the specified global variable to the .s file.
|
||||
virtual void EmitGlobalVariable(const GlobalVariable *GV);
|
||||
|
||||
/// EmitSpecialLLVMGlobal - Check to see if the specified global is a
|
||||
/// special global used by LLVM. If so, emit it and return true, otherwise
|
||||
/// do nothing and return false.
|
||||
bool EmitSpecialLLVMGlobal(const GlobalVariable *GV);
|
||||
|
||||
/// EmitAlignment - Emit an alignment directive to the specified power of
|
||||
/// two boundary. For example, if you pass in 3 here, you will get an 8
|
||||
/// byte alignment. If a global value is specified, and if that global has
|
||||
/// an explicit alignment requested, it will override the alignment request
|
||||
/// if required for correctness.
|
||||
///
|
||||
void EmitAlignment(unsigned NumBits, const GlobalValue *GV = 0) const;
|
||||
|
||||
/// EmitBasicBlockStart - This method prints the label for the specified
|
||||
/// MachineBasicBlock, an alignment (if present) and a comment describing
|
||||
/// it if appropriate.
|
||||
void EmitBasicBlockStart(const MachineBasicBlock *MBB) const;
|
||||
|
||||
/// EmitGlobalConstant - Print a general LLVM constant to the .s file.
|
||||
void EmitGlobalConstant(const Constant *CV, unsigned AddrSpace = 0);
|
||||
|
||||
|
||||
//===------------------------------------------------------------------===//
|
||||
// Overridable Hooks
|
||||
//===------------------------------------------------------------------===//
|
||||
|
||||
// Targets can, or in the case of EmitInstruction, must implement these to
|
||||
// customize output.
|
||||
|
||||
/// EmitStartOfAsmFile - This virtual method can be overridden by targets
|
||||
/// that want to emit something at the start of their file.
|
||||
virtual void EmitStartOfAsmFile(Module &) {}
|
||||
|
||||
/// EmitEndOfAsmFile - This virtual method can be overridden by targets that
|
||||
/// want to emit something at the end of their file.
|
||||
virtual void EmitEndOfAsmFile(Module &) {}
|
||||
|
||||
/// EmitFunctionBodyStart - Targets can override this to emit stuff before
|
||||
/// the first basic block in the function.
|
||||
virtual void EmitFunctionBodyStart() {}
|
||||
|
||||
/// EmitFunctionBodyEnd - Targets can override this to emit stuff after
|
||||
/// the last basic block in the function.
|
||||
virtual void EmitFunctionBodyEnd() {}
|
||||
|
||||
/// EmitInstruction - Targets should implement this to emit instructions.
|
||||
virtual void EmitInstruction(const MachineInstr *) {
|
||||
llvm_unreachable("EmitInstruction not implemented");
|
||||
}
|
||||
|
||||
virtual void EmitFunctionEntryLabel();
|
||||
|
||||
virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV);
|
||||
|
||||
/// EmitXXStructor - Targets can override this to change how global
|
||||
/// constants that are part of a C++ static/global constructor list are
|
||||
/// emitted.
|
||||
virtual void EmitXXStructor(const Constant *CV) {
|
||||
EmitGlobalConstant(CV);
|
||||
}
|
||||
|
||||
/// isBlockOnlyReachableByFallthough - Return true if the basic block has
|
||||
/// exactly one predecessor and the control transfer mechanism between
|
||||
/// the predecessor and this block is a fall-through.
|
||||
virtual bool
|
||||
isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const;
|
||||
|
||||
//===------------------------------------------------------------------===//
|
||||
// Symbol Lowering Routines.
|
||||
//===------------------------------------------------------------------===//
|
||||
public:
|
||||
|
||||
/// GetTempSymbol - Return the MCSymbol corresponding to the assembler
|
||||
/// temporary label with the specified stem and unique ID.
|
||||
MCSymbol *GetTempSymbol(StringRef Name, unsigned ID) const;
|
||||
|
||||
/// GetTempSymbol - Return an assembler temporary label with the specified
|
||||
/// stem.
|
||||
MCSymbol *GetTempSymbol(StringRef Name) const;
|
||||
|
||||
|
||||
/// GetSymbolWithGlobalValueBase - Return the MCSymbol for a symbol with
|
||||
/// global value name as its base, with the specified suffix, and where the
|
||||
/// symbol is forced to have private linkage if ForcePrivate is true.
|
||||
MCSymbol *GetSymbolWithGlobalValueBase(const GlobalValue *GV,
|
||||
StringRef Suffix,
|
||||
bool ForcePrivate = true) const;
|
||||
|
||||
/// GetExternalSymbolSymbol - Return the MCSymbol for the specified
|
||||
/// ExternalSymbol.
|
||||
MCSymbol *GetExternalSymbolSymbol(StringRef Sym) const;
|
||||
|
||||
/// GetCPISymbol - Return the symbol for the specified constant pool entry.
|
||||
MCSymbol *GetCPISymbol(unsigned CPID) const;
|
||||
|
||||
/// GetJTISymbol - Return the symbol for the specified jump table entry.
|
||||
MCSymbol *GetJTISymbol(unsigned JTID, bool isLinkerPrivate = false) const;
|
||||
|
||||
/// GetJTSetSymbol - Return the symbol for the specified jump table .set
|
||||
/// FIXME: privatize to AsmPrinter.
|
||||
MCSymbol *GetJTSetSymbol(unsigned UID, unsigned MBBID) const;
|
||||
|
||||
/// GetBlockAddressSymbol - Return the MCSymbol used to satisfy BlockAddress
|
||||
/// uses of the specified basic block.
|
||||
MCSymbol *GetBlockAddressSymbol(const BlockAddress *BA) const;
|
||||
MCSymbol *GetBlockAddressSymbol(const BasicBlock *BB) const;
|
||||
|
||||
//===------------------------------------------------------------------===//
|
||||
// Emission Helper Routines.
|
||||
//===------------------------------------------------------------------===//
|
||||
public:
|
||||
/// printOffset - This is just convenient handler for printing offsets.
|
||||
void printOffset(int64_t Offset, raw_ostream &OS) const;
|
||||
|
||||
/// EmitInt8 - Emit a byte directive and value.
|
||||
///
|
||||
void EmitInt8(int Value) const;
|
||||
|
||||
/// EmitInt16 - Emit a short directive and value.
|
||||
///
|
||||
void EmitInt16(int Value) const;
|
||||
|
||||
/// EmitInt32 - Emit a long directive and value.
|
||||
///
|
||||
void EmitInt32(int Value) const;
|
||||
|
||||
/// EmitLabelDifference - Emit something like ".long Hi-Lo" where the size
|
||||
/// in bytes of the directive is specified by Size and Hi/Lo specify the
|
||||
/// labels. This implicitly uses .set if it is available.
|
||||
void EmitLabelDifference(const MCSymbol *Hi, const MCSymbol *Lo,
|
||||
unsigned Size) const;
|
||||
|
||||
/// EmitLabelOffsetDifference - Emit something like ".long Hi+Offset-Lo"
|
||||
/// where the size in bytes of the directive is specified by Size and Hi/Lo
|
||||
/// specify the labels. This implicitly uses .set if it is available.
|
||||
void EmitLabelOffsetDifference(const MCSymbol *Hi, uint64_t Offset,
|
||||
const MCSymbol *Lo, unsigned Size) const;
|
||||
|
||||
/// EmitLabelPlusOffset - Emit something like ".long Label+Offset"
|
||||
/// where the size in bytes of the directive is specified by Size and Label
|
||||
/// specifies the label. This implicitly uses .set if it is available.
|
||||
void EmitLabelPlusOffset(const MCSymbol *Label, uint64_t Offset,
|
||||
unsigned Size) const;
|
||||
|
||||
/// EmitLabelReference - Emit something like ".long Label"
|
||||
/// where the size in bytes of the directive is specified by Size and Label
|
||||
/// specifies the label.
|
||||
void EmitLabelReference(const MCSymbol *Label, unsigned Size) const {
|
||||
EmitLabelPlusOffset(Label, 0, Size);
|
||||
}
|
||||
|
||||
//===------------------------------------------------------------------===//
|
||||
// Dwarf Emission Helper Routines
|
||||
//===------------------------------------------------------------------===//
|
||||
|
||||
/// EmitSLEB128 - emit the specified signed leb128 value.
|
||||
void EmitSLEB128(int Value, const char *Desc = 0) const;
|
||||
|
||||
/// EmitULEB128 - emit the specified unsigned leb128 value.
|
||||
void EmitULEB128(unsigned Value, const char *Desc = 0,
|
||||
unsigned PadTo = 0) const;
|
||||
|
||||
/// EmitCFAByte - Emit a .byte 42 directive for a DW_CFA_xxx value.
|
||||
void EmitCFAByte(unsigned Val) const;
|
||||
|
||||
/// EmitEncodingByte - Emit a .byte 42 directive that corresponds to an
|
||||
/// encoding. If verbose assembly output is enabled, we output comments
|
||||
/// describing the encoding. Desc is a string saying what the encoding is
|
||||
/// specifying (e.g. "LSDA").
|
||||
void EmitEncodingByte(unsigned Val, const char *Desc = 0) const;
|
||||
|
||||
/// GetSizeOfEncodedValue - Return the size of the encoding in bytes.
|
||||
unsigned GetSizeOfEncodedValue(unsigned Encoding) const;
|
||||
|
||||
/// EmitReference - Emit reference to a ttype global with a specified encoding.
|
||||
void EmitTTypeReference(const GlobalValue *GV, unsigned Encoding) const;
|
||||
|
||||
/// EmitSectionOffset - Emit the 4-byte offset of Label from the start of
|
||||
/// its section. This can be done with a special directive if the target
|
||||
/// supports it (e.g. cygwin) or by emitting it as an offset from a label at
|
||||
/// the start of the section.
|
||||
///
|
||||
/// SectionLabel is a temporary label emitted at the start of the section
|
||||
/// that Label lives in.
|
||||
void EmitSectionOffset(const MCSymbol *Label,
|
||||
const MCSymbol *SectionLabel) const;
|
||||
|
||||
/// getDebugValueLocation - Get location information encoded by DBG_VALUE
|
||||
/// operands.
|
||||
virtual MachineLocation getDebugValueLocation(const MachineInstr *MI) const;
|
||||
|
||||
/// getISAEncoding - Get the value for DW_AT_APPLE_isa. Zero if no isa
|
||||
/// encoding specified.
|
||||
virtual unsigned getISAEncoding() { return 0; }
|
||||
|
||||
/// EmitDwarfRegOp - Emit dwarf register operation.
|
||||
virtual void EmitDwarfRegOp(const MachineLocation &MLoc) const;
|
||||
|
||||
//===------------------------------------------------------------------===//
|
||||
// Dwarf Lowering Routines
|
||||
//===------------------------------------------------------------------===//
|
||||
|
||||
/// EmitCFIFrameMove - Emit frame instruction to describe the layout of the
|
||||
/// frame.
|
||||
void EmitCFIFrameMove(const MachineMove &Move) const;
|
||||
|
||||
//===------------------------------------------------------------------===//
|
||||
// Inline Asm Support
|
||||
//===------------------------------------------------------------------===//
|
||||
public:
|
||||
// These are hooks that targets can override to implement inline asm
|
||||
// support. These should probably be moved out of AsmPrinter someday.
|
||||
|
||||
/// PrintSpecial - Print information related to the specified machine instr
|
||||
/// that is independent of the operand, and may be independent of the instr
|
||||
/// itself. This can be useful for portably encoding the comment character
|
||||
/// or other bits of target-specific knowledge into the asmstrings. The
|
||||
/// syntax used is ${:comment}. Targets can override this to add support
|
||||
/// for their own strange codes.
|
||||
virtual void PrintSpecial(const MachineInstr *MI, raw_ostream &OS,
|
||||
const char *Code) const;
|
||||
|
||||
/// PrintAsmOperand - Print the specified operand of MI, an INLINEASM
|
||||
/// instruction, using the specified assembler variant. Targets should
|
||||
/// override this to format as appropriate. This method can return true if
|
||||
/// the operand is erroneous.
|
||||
virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
|
||||
unsigned AsmVariant, const char *ExtraCode,
|
||||
raw_ostream &OS);
|
||||
|
||||
/// PrintAsmMemoryOperand - Print the specified operand of MI, an INLINEASM
|
||||
/// instruction, using the specified assembler variant as an address.
|
||||
/// Targets should override this to format as appropriate. This method can
|
||||
/// return true if the operand is erroneous.
|
||||
virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
|
||||
unsigned AsmVariant,
|
||||
const char *ExtraCode,
|
||||
raw_ostream &OS);
|
||||
|
||||
private:
|
||||
/// Private state for PrintSpecial()
|
||||
// Assign a unique ID to this machine instruction.
|
||||
mutable const MachineInstr *LastMI;
|
||||
mutable unsigned LastFn;
|
||||
mutable unsigned Counter;
|
||||
mutable unsigned SetCounter;
|
||||
|
||||
/// EmitInlineAsm - Emit a blob of inline asm to the output streamer.
|
||||
void EmitInlineAsm(StringRef Str, const MDNode *LocMDNode = 0,
|
||||
InlineAsm::AsmDialect AsmDialect = InlineAsm::AD_ATT) const;
|
||||
|
||||
/// EmitInlineAsm - This method formats and emits the specified machine
|
||||
/// instruction that is an inline asm.
|
||||
void EmitInlineAsm(const MachineInstr *MI) const;
|
||||
|
||||
//===------------------------------------------------------------------===//
|
||||
// Internal Implementation Details
|
||||
//===------------------------------------------------------------------===//
|
||||
|
||||
/// EmitVisibility - This emits visibility information about symbol, if
|
||||
/// this is suported by the target.
|
||||
void EmitVisibility(MCSymbol *Sym, unsigned Visibility,
|
||||
bool IsDefinition = true) const;
|
||||
|
||||
void EmitLinkage(unsigned Linkage, MCSymbol *GVSym) const;
|
||||
|
||||
void EmitJumpTableEntry(const MachineJumpTableInfo *MJTI,
|
||||
const MachineBasicBlock *MBB,
|
||||
unsigned uid) const;
|
||||
void EmitLLVMUsedList(const ConstantArray *InitList);
|
||||
void EmitXXStructorList(const Constant *List, bool isCtor);
|
||||
GCMetadataPrinter *GetOrCreateGCPrinter(GCStrategy *C);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
78
thirdparty/clang/include/llvm/CodeGen/CalcSpillWeights.h
vendored
Normal file
78
thirdparty/clang/include/llvm/CodeGen/CalcSpillWeights.h
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
//===---------------- lib/CodeGen/CalcSpillWeights.h ------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
|
||||
#ifndef LLVM_CODEGEN_CALCSPILLWEIGHTS_H
|
||||
#define LLVM_CODEGEN_CALCSPILLWEIGHTS_H
|
||||
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/CodeGen/SlotIndexes.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class LiveInterval;
|
||||
class LiveIntervals;
|
||||
class MachineLoopInfo;
|
||||
|
||||
/// normalizeSpillWeight - The spill weight of a live interval is computed as:
|
||||
///
|
||||
/// (sum(use freq) + sum(def freq)) / (K + size)
|
||||
///
|
||||
/// @param UseDefFreq Expected number of executed use and def instructions
|
||||
/// per function call. Derived from block frequencies.
|
||||
/// @param Size Size of live interval as returnexd by getSize()
|
||||
///
|
||||
static inline float normalizeSpillWeight(float UseDefFreq, unsigned Size) {
|
||||
// The constant 25 instructions is added to avoid depending too much on
|
||||
// accidental SlotIndex gaps for small intervals. The effect is that small
|
||||
// intervals have a spill weight that is mostly proportional to the number
|
||||
// of uses, while large intervals get a spill weight that is closer to a use
|
||||
// density.
|
||||
return UseDefFreq / (Size + 25*SlotIndex::InstrDist);
|
||||
}
|
||||
|
||||
/// VirtRegAuxInfo - Calculate auxiliary information for a virtual
|
||||
/// register such as its spill weight and allocation hint.
|
||||
class VirtRegAuxInfo {
|
||||
MachineFunction &MF;
|
||||
LiveIntervals &LIS;
|
||||
const MachineLoopInfo &Loops;
|
||||
DenseMap<unsigned, float> Hint;
|
||||
public:
|
||||
VirtRegAuxInfo(MachineFunction &mf, LiveIntervals &lis,
|
||||
const MachineLoopInfo &loops) :
|
||||
MF(mf), LIS(lis), Loops(loops) {}
|
||||
|
||||
/// CalculateWeightAndHint - (re)compute li's spill weight and allocation
|
||||
/// hint.
|
||||
void CalculateWeightAndHint(LiveInterval &li);
|
||||
};
|
||||
|
||||
/// CalculateSpillWeights - Compute spill weights for all virtual register
|
||||
/// live intervals.
|
||||
class CalculateSpillWeights : public MachineFunctionPass {
|
||||
public:
|
||||
static char ID;
|
||||
|
||||
CalculateSpillWeights() : MachineFunctionPass(ID) {
|
||||
initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
|
||||
}
|
||||
|
||||
virtual void getAnalysisUsage(AnalysisUsage &au) const;
|
||||
|
||||
virtual bool runOnMachineFunction(MachineFunction &fn);
|
||||
|
||||
private:
|
||||
/// Returns true if the given live interval is zero length.
|
||||
bool isZeroLengthInterval(LiveInterval *li) const;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif // LLVM_CODEGEN_CALCSPILLWEIGHTS_H
|
||||
327
thirdparty/clang/include/llvm/CodeGen/CallingConvLower.h
vendored
Normal file
327
thirdparty/clang/include/llvm/CodeGen/CallingConvLower.h
vendored
Normal file
@@ -0,0 +1,327 @@
|
||||
//===-- llvm/CallingConvLower.h - Calling Conventions -----------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file declares the CCState and CCValAssign classes, used for lowering
|
||||
// and implementing calling conventions.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_CALLINGCONVLOWER_H
|
||||
#define LLVM_CODEGEN_CALLINGCONVLOWER_H
|
||||
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/CodeGen/MachineFrameInfo.h"
|
||||
#include "llvm/CodeGen/MachineFunction.h"
|
||||
#include "llvm/CodeGen/ValueTypes.h"
|
||||
#include "llvm/IR/CallingConv.h"
|
||||
#include "llvm/Target/TargetCallingConv.h"
|
||||
|
||||
namespace llvm {
|
||||
class TargetRegisterInfo;
|
||||
class TargetMachine;
|
||||
class CCState;
|
||||
|
||||
/// CCValAssign - Represent assignment of one arg/retval to a location.
|
||||
class CCValAssign {
|
||||
public:
|
||||
enum LocInfo {
|
||||
Full, // The value fills the full location.
|
||||
SExt, // The value is sign extended in the location.
|
||||
ZExt, // The value is zero extended in the location.
|
||||
AExt, // The value is extended with undefined upper bits.
|
||||
BCvt, // The value is bit-converted in the location.
|
||||
VExt, // The value is vector-widened in the location.
|
||||
// FIXME: Not implemented yet. Code that uses AExt to mean
|
||||
// vector-widen should be fixed to use VExt instead.
|
||||
Indirect // The location contains pointer to the value.
|
||||
// TODO: a subset of the value is in the location.
|
||||
};
|
||||
private:
|
||||
/// ValNo - This is the value number begin assigned (e.g. an argument number).
|
||||
unsigned ValNo;
|
||||
|
||||
/// Loc is either a stack offset or a register number.
|
||||
unsigned Loc;
|
||||
|
||||
/// isMem - True if this is a memory loc, false if it is a register loc.
|
||||
unsigned isMem : 1;
|
||||
|
||||
/// isCustom - True if this arg/retval requires special handling.
|
||||
unsigned isCustom : 1;
|
||||
|
||||
/// Information about how the value is assigned.
|
||||
LocInfo HTP : 6;
|
||||
|
||||
/// ValVT - The type of the value being assigned.
|
||||
MVT ValVT;
|
||||
|
||||
/// LocVT - The type of the location being assigned to.
|
||||
MVT LocVT;
|
||||
public:
|
||||
|
||||
static CCValAssign getReg(unsigned ValNo, MVT ValVT,
|
||||
unsigned RegNo, MVT LocVT,
|
||||
LocInfo HTP) {
|
||||
CCValAssign Ret;
|
||||
Ret.ValNo = ValNo;
|
||||
Ret.Loc = RegNo;
|
||||
Ret.isMem = false;
|
||||
Ret.isCustom = false;
|
||||
Ret.HTP = HTP;
|
||||
Ret.ValVT = ValVT;
|
||||
Ret.LocVT = LocVT;
|
||||
return Ret;
|
||||
}
|
||||
|
||||
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT,
|
||||
unsigned RegNo, MVT LocVT,
|
||||
LocInfo HTP) {
|
||||
CCValAssign Ret;
|
||||
Ret = getReg(ValNo, ValVT, RegNo, LocVT, HTP);
|
||||
Ret.isCustom = true;
|
||||
return Ret;
|
||||
}
|
||||
|
||||
static CCValAssign getMem(unsigned ValNo, MVT ValVT,
|
||||
unsigned Offset, MVT LocVT,
|
||||
LocInfo HTP) {
|
||||
CCValAssign Ret;
|
||||
Ret.ValNo = ValNo;
|
||||
Ret.Loc = Offset;
|
||||
Ret.isMem = true;
|
||||
Ret.isCustom = false;
|
||||
Ret.HTP = HTP;
|
||||
Ret.ValVT = ValVT;
|
||||
Ret.LocVT = LocVT;
|
||||
return Ret;
|
||||
}
|
||||
|
||||
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT,
|
||||
unsigned Offset, MVT LocVT,
|
||||
LocInfo HTP) {
|
||||
CCValAssign Ret;
|
||||
Ret = getMem(ValNo, ValVT, Offset, LocVT, HTP);
|
||||
Ret.isCustom = true;
|
||||
return Ret;
|
||||
}
|
||||
|
||||
unsigned getValNo() const { return ValNo; }
|
||||
MVT getValVT() const { return ValVT; }
|
||||
|
||||
bool isRegLoc() const { return !isMem; }
|
||||
bool isMemLoc() const { return isMem; }
|
||||
|
||||
bool needsCustom() const { return isCustom; }
|
||||
|
||||
unsigned getLocReg() const { assert(isRegLoc()); return Loc; }
|
||||
unsigned getLocMemOffset() const { assert(isMemLoc()); return Loc; }
|
||||
MVT getLocVT() const { return LocVT; }
|
||||
|
||||
LocInfo getLocInfo() const { return HTP; }
|
||||
bool isExtInLoc() const {
|
||||
return (HTP == AExt || HTP == SExt || HTP == ZExt);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
/// CCAssignFn - This function assigns a location for Val, updating State to
|
||||
/// reflect the change. It returns 'true' if it failed to handle Val.
|
||||
typedef bool CCAssignFn(unsigned ValNo, MVT ValVT,
|
||||
MVT LocVT, CCValAssign::LocInfo LocInfo,
|
||||
ISD::ArgFlagsTy ArgFlags, CCState &State);
|
||||
|
||||
/// CCCustomFn - This function assigns a location for Val, possibly updating
|
||||
/// all args to reflect changes and indicates if it handled it. It must set
|
||||
/// isCustom if it handles the arg and returns true.
|
||||
typedef bool CCCustomFn(unsigned &ValNo, MVT &ValVT,
|
||||
MVT &LocVT, CCValAssign::LocInfo &LocInfo,
|
||||
ISD::ArgFlagsTy &ArgFlags, CCState &State);
|
||||
|
||||
/// ParmContext - This enum tracks whether calling convention lowering is in
|
||||
/// the context of prologue or call generation. Not all backends make use of
|
||||
/// this information.
|
||||
typedef enum { Unknown, Prologue, Call } ParmContext;
|
||||
|
||||
/// CCState - This class holds information needed while lowering arguments and
|
||||
/// return values. It captures which registers are already assigned and which
|
||||
/// stack slots are used. It provides accessors to allocate these values.
|
||||
class CCState {
|
||||
private:
|
||||
CallingConv::ID CallingConv;
|
||||
bool IsVarArg;
|
||||
MachineFunction &MF;
|
||||
const TargetMachine &TM;
|
||||
const TargetRegisterInfo &TRI;
|
||||
SmallVector<CCValAssign, 16> &Locs;
|
||||
LLVMContext &Context;
|
||||
|
||||
unsigned StackOffset;
|
||||
SmallVector<uint32_t, 16> UsedRegs;
|
||||
unsigned FirstByValReg;
|
||||
bool FirstByValRegValid;
|
||||
|
||||
protected:
|
||||
ParmContext CallOrPrologue;
|
||||
|
||||
public:
|
||||
CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
|
||||
const TargetMachine &TM, SmallVector<CCValAssign, 16> &locs,
|
||||
LLVMContext &C);
|
||||
|
||||
void addLoc(const CCValAssign &V) {
|
||||
Locs.push_back(V);
|
||||
}
|
||||
|
||||
LLVMContext &getContext() const { return Context; }
|
||||
const TargetMachine &getTarget() const { return TM; }
|
||||
MachineFunction &getMachineFunction() const { return MF; }
|
||||
CallingConv::ID getCallingConv() const { return CallingConv; }
|
||||
bool isVarArg() const { return IsVarArg; }
|
||||
|
||||
unsigned getNextStackOffset() const { return StackOffset; }
|
||||
|
||||
/// isAllocated - Return true if the specified register (or an alias) is
|
||||
/// allocated.
|
||||
bool isAllocated(unsigned Reg) const {
|
||||
return UsedRegs[Reg/32] & (1 << (Reg&31));
|
||||
}
|
||||
|
||||
/// AnalyzeFormalArguments - Analyze an array of argument values,
|
||||
/// incorporating info about the formals into this state.
|
||||
void AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
|
||||
CCAssignFn Fn);
|
||||
|
||||
/// AnalyzeReturn - Analyze the returned values of a return,
|
||||
/// incorporating info about the result values into this state.
|
||||
void AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
CCAssignFn Fn);
|
||||
|
||||
/// CheckReturn - Analyze the return values of a function, returning
|
||||
/// true if the return can be performed without sret-demotion, and
|
||||
/// false otherwise.
|
||||
bool CheckReturn(const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
|
||||
CCAssignFn Fn);
|
||||
|
||||
/// AnalyzeCallOperands - Analyze the outgoing arguments to a call,
|
||||
/// incorporating info about the passed values into this state.
|
||||
void AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
CCAssignFn Fn);
|
||||
|
||||
/// AnalyzeCallOperands - Same as above except it takes vectors of types
|
||||
/// and argument flags.
|
||||
void AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
|
||||
SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
|
||||
CCAssignFn Fn);
|
||||
|
||||
/// AnalyzeCallResult - Analyze the return values of a call,
|
||||
/// incorporating info about the passed values into this state.
|
||||
void AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
|
||||
CCAssignFn Fn);
|
||||
|
||||
/// AnalyzeCallResult - Same as above except it's specialized for calls which
|
||||
/// produce a single value.
|
||||
void AnalyzeCallResult(MVT VT, CCAssignFn Fn);
|
||||
|
||||
/// getFirstUnallocated - Return the first unallocated register in the set, or
|
||||
/// NumRegs if they are all allocated.
|
||||
unsigned getFirstUnallocated(const uint16_t *Regs, unsigned NumRegs) const {
|
||||
for (unsigned i = 0; i != NumRegs; ++i)
|
||||
if (!isAllocated(Regs[i]))
|
||||
return i;
|
||||
return NumRegs;
|
||||
}
|
||||
|
||||
/// AllocateReg - Attempt to allocate one register. If it is not available,
|
||||
/// return zero. Otherwise, return the register, marking it and any aliases
|
||||
/// as allocated.
|
||||
unsigned AllocateReg(unsigned Reg) {
|
||||
if (isAllocated(Reg)) return 0;
|
||||
MarkAllocated(Reg);
|
||||
return Reg;
|
||||
}
|
||||
|
||||
/// Version of AllocateReg with extra register to be shadowed.
|
||||
unsigned AllocateReg(unsigned Reg, unsigned ShadowReg) {
|
||||
if (isAllocated(Reg)) return 0;
|
||||
MarkAllocated(Reg);
|
||||
MarkAllocated(ShadowReg);
|
||||
return Reg;
|
||||
}
|
||||
|
||||
/// AllocateReg - Attempt to allocate one of the specified registers. If none
|
||||
/// are available, return zero. Otherwise, return the first one available,
|
||||
/// marking it and any aliases as allocated.
|
||||
unsigned AllocateReg(const uint16_t *Regs, unsigned NumRegs) {
|
||||
unsigned FirstUnalloc = getFirstUnallocated(Regs, NumRegs);
|
||||
if (FirstUnalloc == NumRegs)
|
||||
return 0; // Didn't find the reg.
|
||||
|
||||
// Mark the register and any aliases as allocated.
|
||||
unsigned Reg = Regs[FirstUnalloc];
|
||||
MarkAllocated(Reg);
|
||||
return Reg;
|
||||
}
|
||||
|
||||
/// Version of AllocateReg with list of registers to be shadowed.
|
||||
unsigned AllocateReg(const uint16_t *Regs, const uint16_t *ShadowRegs,
|
||||
unsigned NumRegs) {
|
||||
unsigned FirstUnalloc = getFirstUnallocated(Regs, NumRegs);
|
||||
if (FirstUnalloc == NumRegs)
|
||||
return 0; // Didn't find the reg.
|
||||
|
||||
// Mark the register and any aliases as allocated.
|
||||
unsigned Reg = Regs[FirstUnalloc], ShadowReg = ShadowRegs[FirstUnalloc];
|
||||
MarkAllocated(Reg);
|
||||
MarkAllocated(ShadowReg);
|
||||
return Reg;
|
||||
}
|
||||
|
||||
/// AllocateStack - Allocate a chunk of stack space with the specified size
|
||||
/// and alignment.
|
||||
unsigned AllocateStack(unsigned Size, unsigned Align) {
|
||||
assert(Align && ((Align-1) & Align) == 0); // Align is power of 2.
|
||||
StackOffset = ((StackOffset + Align-1) & ~(Align-1));
|
||||
unsigned Result = StackOffset;
|
||||
StackOffset += Size;
|
||||
MF.getFrameInfo()->ensureMaxAlignment(Align);
|
||||
return Result;
|
||||
}
|
||||
|
||||
/// Version of AllocateStack with extra register to be shadowed.
|
||||
unsigned AllocateStack(unsigned Size, unsigned Align, unsigned ShadowReg) {
|
||||
MarkAllocated(ShadowReg);
|
||||
return AllocateStack(Size, Align);
|
||||
}
|
||||
|
||||
// HandleByVal - Allocate a stack slot large enough to pass an argument by
|
||||
// value. The size and alignment information of the argument is encoded in its
|
||||
// parameter attribute.
|
||||
void HandleByVal(unsigned ValNo, MVT ValVT,
|
||||
MVT LocVT, CCValAssign::LocInfo LocInfo,
|
||||
int MinSize, int MinAlign, ISD::ArgFlagsTy ArgFlags);
|
||||
|
||||
// First GPR that carries part of a byval aggregate that's split
|
||||
// between registers and memory.
|
||||
unsigned getFirstByValReg() const { return FirstByValRegValid ? FirstByValReg : 0; }
|
||||
void setFirstByValReg(unsigned r) { FirstByValReg = r; FirstByValRegValid = true; }
|
||||
void clearFirstByValReg() { FirstByValReg = 0; FirstByValRegValid = false; }
|
||||
bool isFirstByValRegValid() const { return FirstByValRegValid; }
|
||||
|
||||
ParmContext getCallOrPrologue() const { return CallOrPrologue; }
|
||||
|
||||
private:
|
||||
/// MarkAllocated - Mark a register and all of its aliases as allocated.
|
||||
void MarkAllocated(unsigned Reg);
|
||||
};
|
||||
|
||||
|
||||
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
227
thirdparty/clang/include/llvm/CodeGen/CommandFlags.h
vendored
Normal file
227
thirdparty/clang/include/llvm/CodeGen/CommandFlags.h
vendored
Normal file
@@ -0,0 +1,227 @@
|
||||
//===-- CommandFlags.h - Command Line Flags Interface -----------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file contains codegen-specific flags that are shared between different
|
||||
// command line tools. The tools "llc" and "opt" both use this file to prevent
|
||||
// flag duplication.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_COMMANDFLAGS_H
|
||||
#define LLVM_CODEGEN_COMMANDFLAGS_H
|
||||
|
||||
#include "llvm/Support/CodeGen.h"
|
||||
#include "llvm/Support/CommandLine.h"
|
||||
#include "llvm/Target/TargetMachine.h"
|
||||
#include <string>
|
||||
using namespace llvm;
|
||||
|
||||
cl::opt<std::string>
|
||||
MArch("march", cl::desc("Architecture to generate code for (see --version)"));
|
||||
|
||||
cl::opt<std::string>
|
||||
MCPU("mcpu",
|
||||
cl::desc("Target a specific cpu type (-mcpu=help for details)"),
|
||||
cl::value_desc("cpu-name"),
|
||||
cl::init(""));
|
||||
|
||||
cl::list<std::string>
|
||||
MAttrs("mattr",
|
||||
cl::CommaSeparated,
|
||||
cl::desc("Target specific attributes (-mattr=help for details)"),
|
||||
cl::value_desc("a1,+a2,-a3,..."));
|
||||
|
||||
cl::opt<Reloc::Model>
|
||||
RelocModel("relocation-model",
|
||||
cl::desc("Choose relocation model"),
|
||||
cl::init(Reloc::Default),
|
||||
cl::values(
|
||||
clEnumValN(Reloc::Default, "default",
|
||||
"Target default relocation model"),
|
||||
clEnumValN(Reloc::Static, "static",
|
||||
"Non-relocatable code"),
|
||||
clEnumValN(Reloc::PIC_, "pic",
|
||||
"Fully relocatable, position independent code"),
|
||||
clEnumValN(Reloc::DynamicNoPIC, "dynamic-no-pic",
|
||||
"Relocatable external references, non-relocatable code"),
|
||||
clEnumValEnd));
|
||||
|
||||
cl::opt<llvm::CodeModel::Model>
|
||||
CMModel("code-model",
|
||||
cl::desc("Choose code model"),
|
||||
cl::init(CodeModel::Default),
|
||||
cl::values(clEnumValN(CodeModel::Default, "default",
|
||||
"Target default code model"),
|
||||
clEnumValN(CodeModel::Small, "small",
|
||||
"Small code model"),
|
||||
clEnumValN(CodeModel::Kernel, "kernel",
|
||||
"Kernel code model"),
|
||||
clEnumValN(CodeModel::Medium, "medium",
|
||||
"Medium code model"),
|
||||
clEnumValN(CodeModel::Large, "large",
|
||||
"Large code model"),
|
||||
clEnumValEnd));
|
||||
|
||||
cl::opt<bool>
|
||||
RelaxAll("mc-relax-all",
|
||||
cl::desc("When used with filetype=obj, "
|
||||
"relax all fixups in the emitted object file"));
|
||||
|
||||
cl::opt<TargetMachine::CodeGenFileType>
|
||||
FileType("filetype", cl::init(TargetMachine::CGFT_AssemblyFile),
|
||||
cl::desc("Choose a file type (not all types are supported by all targets):"),
|
||||
cl::values(
|
||||
clEnumValN(TargetMachine::CGFT_AssemblyFile, "asm",
|
||||
"Emit an assembly ('.s') file"),
|
||||
clEnumValN(TargetMachine::CGFT_ObjectFile, "obj",
|
||||
"Emit a native object ('.o') file"),
|
||||
clEnumValN(TargetMachine::CGFT_Null, "null",
|
||||
"Emit nothing, for performance testing"),
|
||||
clEnumValEnd));
|
||||
|
||||
cl::opt<bool> DisableDotLoc("disable-dot-loc", cl::Hidden,
|
||||
cl::desc("Do not use .loc entries"));
|
||||
|
||||
cl::opt<bool> DisableCFI("disable-cfi", cl::Hidden,
|
||||
cl::desc("Do not use .cfi_* directives"));
|
||||
|
||||
cl::opt<bool> EnableDwarfDirectory("enable-dwarf-directory", cl::Hidden,
|
||||
cl::desc("Use .file directives with an explicit directory."));
|
||||
|
||||
cl::opt<bool>
|
||||
DisableRedZone("disable-red-zone",
|
||||
cl::desc("Do not emit code that uses the red zone."),
|
||||
cl::init(false));
|
||||
|
||||
cl::opt<bool>
|
||||
EnableFPMAD("enable-fp-mad",
|
||||
cl::desc("Enable less precise MAD instructions to be generated"),
|
||||
cl::init(false));
|
||||
|
||||
cl::opt<bool>
|
||||
DisableFPElim("disable-fp-elim",
|
||||
cl::desc("Disable frame pointer elimination optimization"),
|
||||
cl::init(false));
|
||||
|
||||
cl::opt<bool>
|
||||
DisableFPElimNonLeaf("disable-non-leaf-fp-elim",
|
||||
cl::desc("Disable frame pointer elimination optimization for non-leaf funcs"),
|
||||
cl::init(false));
|
||||
|
||||
cl::opt<bool>
|
||||
EnableUnsafeFPMath("enable-unsafe-fp-math",
|
||||
cl::desc("Enable optimizations that may decrease FP precision"),
|
||||
cl::init(false));
|
||||
|
||||
cl::opt<bool>
|
||||
EnableNoInfsFPMath("enable-no-infs-fp-math",
|
||||
cl::desc("Enable FP math optimizations that assume no +-Infs"),
|
||||
cl::init(false));
|
||||
|
||||
cl::opt<bool>
|
||||
EnableNoNaNsFPMath("enable-no-nans-fp-math",
|
||||
cl::desc("Enable FP math optimizations that assume no NaNs"),
|
||||
cl::init(false));
|
||||
|
||||
cl::opt<bool>
|
||||
EnableHonorSignDependentRoundingFPMath("enable-sign-dependent-rounding-fp-math",
|
||||
cl::Hidden,
|
||||
cl::desc("Force codegen to assume rounding mode can change dynamically"),
|
||||
cl::init(false));
|
||||
|
||||
cl::opt<bool>
|
||||
GenerateSoftFloatCalls("soft-float",
|
||||
cl::desc("Generate software floating point library calls"),
|
||||
cl::init(false));
|
||||
|
||||
cl::opt<llvm::FloatABI::ABIType>
|
||||
FloatABIForCalls("float-abi",
|
||||
cl::desc("Choose float ABI type"),
|
||||
cl::init(FloatABI::Default),
|
||||
cl::values(
|
||||
clEnumValN(FloatABI::Default, "default",
|
||||
"Target default float ABI type"),
|
||||
clEnumValN(FloatABI::Soft, "soft",
|
||||
"Soft float ABI (implied by -soft-float)"),
|
||||
clEnumValN(FloatABI::Hard, "hard",
|
||||
"Hard float ABI (uses FP registers)"),
|
||||
clEnumValEnd));
|
||||
|
||||
cl::opt<llvm::FPOpFusion::FPOpFusionMode>
|
||||
FuseFPOps("fp-contract",
|
||||
cl::desc("Enable aggresive formation of fused FP ops"),
|
||||
cl::init(FPOpFusion::Standard),
|
||||
cl::values(
|
||||
clEnumValN(FPOpFusion::Fast, "fast",
|
||||
"Fuse FP ops whenever profitable"),
|
||||
clEnumValN(FPOpFusion::Standard, "on",
|
||||
"Only fuse 'blessed' FP ops."),
|
||||
clEnumValN(FPOpFusion::Strict, "off",
|
||||
"Only fuse FP ops when the result won't be effected."),
|
||||
clEnumValEnd));
|
||||
|
||||
cl::opt<bool>
|
||||
DontPlaceZerosInBSS("nozero-initialized-in-bss",
|
||||
cl::desc("Don't place zero-initialized symbols into bss section"),
|
||||
cl::init(false));
|
||||
|
||||
cl::opt<bool>
|
||||
EnableGuaranteedTailCallOpt("tailcallopt",
|
||||
cl::desc("Turn fastcc calls into tail calls by (potentially) changing ABI."),
|
||||
cl::init(false));
|
||||
|
||||
cl::opt<bool>
|
||||
DisableTailCalls("disable-tail-calls",
|
||||
cl::desc("Never emit tail calls"),
|
||||
cl::init(false));
|
||||
|
||||
cl::opt<unsigned>
|
||||
OverrideStackAlignment("stack-alignment",
|
||||
cl::desc("Override default stack alignment"),
|
||||
cl::init(0));
|
||||
|
||||
cl::opt<bool>
|
||||
EnableRealignStack("realign-stack",
|
||||
cl::desc("Realign stack if needed"),
|
||||
cl::init(true));
|
||||
|
||||
cl::opt<std::string>
|
||||
TrapFuncName("trap-func", cl::Hidden,
|
||||
cl::desc("Emit a call to trap function rather than a trap instruction"),
|
||||
cl::init(""));
|
||||
|
||||
cl::opt<bool>
|
||||
EnablePIE("enable-pie",
|
||||
cl::desc("Assume the creation of a position independent executable."),
|
||||
cl::init(false));
|
||||
|
||||
cl::opt<bool>
|
||||
SegmentedStacks("segmented-stacks",
|
||||
cl::desc("Use segmented stacks if possible."),
|
||||
cl::init(false));
|
||||
|
||||
cl::opt<bool>
|
||||
UseInitArray("use-init-array",
|
||||
cl::desc("Use .init_array instead of .ctors."),
|
||||
cl::init(false));
|
||||
|
||||
cl::opt<std::string> StopAfter("stop-after",
|
||||
cl::desc("Stop compilation after a specific pass"),
|
||||
cl::value_desc("pass-name"),
|
||||
cl::init(""));
|
||||
cl::opt<std::string> StartAfter("start-after",
|
||||
cl::desc("Resume compilation after a specific pass"),
|
||||
cl::value_desc("pass-name"),
|
||||
cl::init(""));
|
||||
|
||||
cl::opt<unsigned>
|
||||
SSPBufferSize("stack-protector-buffer-size", cl::init(8),
|
||||
cl::desc("Lower bound for a buffer to be considered for "
|
||||
"stack protection"));
|
||||
#endif
|
||||
25
thirdparty/clang/include/llvm/CodeGen/DAGCombine.h
vendored
Normal file
25
thirdparty/clang/include/llvm/CodeGen/DAGCombine.h
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
//===-- llvm/CodeGen/DAGCombine.h ------- SelectionDAG Nodes ---*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
|
||||
#ifndef LLVM_CODEGEN_DAGCOMBINE_H
|
||||
#define LLVM_CODEGEN_DAGCOMBINE_H
|
||||
|
||||
namespace llvm {
|
||||
|
||||
enum CombineLevel {
|
||||
BeforeLegalizeTypes,
|
||||
AfterLegalizeTypes,
|
||||
AfterLegalizeVectorOps,
|
||||
AfterLegalizeDAG
|
||||
};
|
||||
|
||||
} // end llvm namespace
|
||||
|
||||
#endif
|
||||
167
thirdparty/clang/include/llvm/CodeGen/DFAPacketizer.h
vendored
Normal file
167
thirdparty/clang/include/llvm/CodeGen/DFAPacketizer.h
vendored
Normal file
@@ -0,0 +1,167 @@
|
||||
//=- llvm/CodeGen/DFAPacketizer.h - DFA Packetizer for VLIW ---*- C++ -*-=====//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
// This class implements a deterministic finite automaton (DFA) based
|
||||
// packetizing mechanism for VLIW architectures. It provides APIs to
|
||||
// determine whether there exists a legal mapping of instructions to
|
||||
// functional unit assignments in a packet. The DFA is auto-generated from
|
||||
// the target's Schedule.td file.
|
||||
//
|
||||
// A DFA consists of 3 major elements: states, inputs, and transitions. For
|
||||
// the packetizing mechanism, the input is the set of instruction classes for
|
||||
// a target. The state models all possible combinations of functional unit
|
||||
// consumption for a given set of instructions in a packet. A transition
|
||||
// models the addition of an instruction to a packet. In the DFA constructed
|
||||
// by this class, if an instruction can be added to a packet, then a valid
|
||||
// transition exists from the corresponding state. Invalid transitions
|
||||
// indicate that the instruction cannot be added to the current packet.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_DFAPACKETIZER_H
|
||||
#define LLVM_CODEGEN_DFAPACKETIZER_H
|
||||
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||
#include <map>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class MCInstrDesc;
|
||||
class MachineInstr;
|
||||
class MachineLoopInfo;
|
||||
class MachineDominatorTree;
|
||||
class InstrItineraryData;
|
||||
class DefaultVLIWScheduler;
|
||||
class SUnit;
|
||||
|
||||
class DFAPacketizer {
|
||||
private:
|
||||
typedef std::pair<unsigned, unsigned> UnsignPair;
|
||||
const InstrItineraryData *InstrItins;
|
||||
int CurrentState;
|
||||
const int (*DFAStateInputTable)[2];
|
||||
const unsigned *DFAStateEntryTable;
|
||||
|
||||
// CachedTable is a map from <FromState, Input> to ToState.
|
||||
DenseMap<UnsignPair, unsigned> CachedTable;
|
||||
|
||||
// ReadTable - Read the DFA transition table and update CachedTable.
|
||||
void ReadTable(unsigned int state);
|
||||
|
||||
public:
|
||||
DFAPacketizer(const InstrItineraryData *I, const int (*SIT)[2],
|
||||
const unsigned *SET);
|
||||
|
||||
// Reset the current state to make all resources available.
|
||||
void clearResources() {
|
||||
CurrentState = 0;
|
||||
}
|
||||
|
||||
// canReserveResources - Check if the resources occupied by a MCInstrDesc
|
||||
// are available in the current state.
|
||||
bool canReserveResources(const llvm::MCInstrDesc *MID);
|
||||
|
||||
// reserveResources - Reserve the resources occupied by a MCInstrDesc and
|
||||
// change the current state to reflect that change.
|
||||
void reserveResources(const llvm::MCInstrDesc *MID);
|
||||
|
||||
// canReserveResources - Check if the resources occupied by a machine
|
||||
// instruction are available in the current state.
|
||||
bool canReserveResources(llvm::MachineInstr *MI);
|
||||
|
||||
// reserveResources - Reserve the resources occupied by a machine
|
||||
// instruction and change the current state to reflect that change.
|
||||
void reserveResources(llvm::MachineInstr *MI);
|
||||
|
||||
const InstrItineraryData *getInstrItins() const { return InstrItins; }
|
||||
};
|
||||
|
||||
// VLIWPacketizerList - Implements a simple VLIW packetizer using DFA. The
|
||||
// packetizer works on machine basic blocks. For each instruction I in BB, the
|
||||
// packetizer consults the DFA to see if machine resources are available to
|
||||
// execute I. If so, the packetizer checks if I depends on any instruction J in
|
||||
// the current packet. If no dependency is found, I is added to current packet
|
||||
// and machine resource is marked as taken. If any dependency is found, a target
|
||||
// API call is made to prune the dependence.
|
||||
class VLIWPacketizerList {
|
||||
protected:
|
||||
const TargetMachine &TM;
|
||||
const MachineFunction &MF;
|
||||
const TargetInstrInfo *TII;
|
||||
|
||||
// The VLIW Scheduler.
|
||||
DefaultVLIWScheduler *VLIWScheduler;
|
||||
|
||||
// Vector of instructions assigned to the current packet.
|
||||
std::vector<MachineInstr*> CurrentPacketMIs;
|
||||
// DFA resource tracker.
|
||||
DFAPacketizer *ResourceTracker;
|
||||
|
||||
// Generate MI -> SU map.
|
||||
std::map<MachineInstr*, SUnit*> MIToSUnit;
|
||||
|
||||
public:
|
||||
VLIWPacketizerList(
|
||||
MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
|
||||
bool IsPostRA);
|
||||
|
||||
virtual ~VLIWPacketizerList();
|
||||
|
||||
// PacketizeMIs - Implement this API in the backend to bundle instructions.
|
||||
void PacketizeMIs(MachineBasicBlock *MBB,
|
||||
MachineBasicBlock::iterator BeginItr,
|
||||
MachineBasicBlock::iterator EndItr);
|
||||
|
||||
// getResourceTracker - return ResourceTracker
|
||||
DFAPacketizer *getResourceTracker() {return ResourceTracker;}
|
||||
|
||||
// addToPacket - Add MI to the current packet.
|
||||
virtual MachineBasicBlock::iterator addToPacket(MachineInstr *MI) {
|
||||
MachineBasicBlock::iterator MII = MI;
|
||||
CurrentPacketMIs.push_back(MI);
|
||||
ResourceTracker->reserveResources(MI);
|
||||
return MII;
|
||||
}
|
||||
|
||||
// endPacket - End the current packet.
|
||||
void endPacket(MachineBasicBlock *MBB, MachineInstr *MI);
|
||||
|
||||
// initPacketizerState - perform initialization before packetizing
|
||||
// an instruction. This function is supposed to be overrided by
|
||||
// the target dependent packetizer.
|
||||
virtual void initPacketizerState() { return; }
|
||||
|
||||
// ignorePseudoInstruction - Ignore bundling of pseudo instructions.
|
||||
virtual bool ignorePseudoInstruction(MachineInstr *I,
|
||||
MachineBasicBlock *MBB) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// isSoloInstruction - return true if instruction MI can not be packetized
|
||||
// with any other instruction, which means that MI itself is a packet.
|
||||
virtual bool isSoloInstruction(MachineInstr *MI) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// isLegalToPacketizeTogether - Is it legal to packetize SUI and SUJ
|
||||
// together.
|
||||
virtual bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// isLegalToPruneDependencies - Is it legal to prune dependece between SUI
|
||||
// and SUJ.
|
||||
virtual bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {
|
||||
return false;
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
69
thirdparty/clang/include/llvm/CodeGen/EdgeBundles.h
vendored
Normal file
69
thirdparty/clang/include/llvm/CodeGen/EdgeBundles.h
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
//===-------- EdgeBundles.h - Bundles of CFG edges --------------*- c++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// The EdgeBundles analysis forms equivalence classes of CFG edges such that all
|
||||
// edges leaving a machine basic block are in the same bundle, and all edges
|
||||
// leaving a basic block are in the same bundle.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_EDGEBUNDLES_H
|
||||
#define LLVM_CODEGEN_EDGEBUNDLES_H
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/IntEqClasses.h"
|
||||
#include "llvm/ADT/Twine.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class EdgeBundles : public MachineFunctionPass {
|
||||
const MachineFunction *MF;
|
||||
|
||||
/// EC - Each edge bundle is an equivalence class. The keys are:
|
||||
/// 2*BB->getNumber() -> Ingoing bundle.
|
||||
/// 2*BB->getNumber()+1 -> Outgoing bundle.
|
||||
IntEqClasses EC;
|
||||
|
||||
/// Blocks - Map each bundle to a list of basic block numbers.
|
||||
SmallVector<SmallVector<unsigned, 8>, 4> Blocks;
|
||||
|
||||
public:
|
||||
static char ID;
|
||||
EdgeBundles() : MachineFunctionPass(ID) {}
|
||||
|
||||
/// getBundle - Return the ingoing (Out = false) or outgoing (Out = true)
|
||||
/// bundle number for basic block #N
|
||||
unsigned getBundle(unsigned N, bool Out) const { return EC[2 * N + Out]; }
|
||||
|
||||
/// getNumBundles - Return the total number of bundles in the CFG.
|
||||
unsigned getNumBundles() const { return EC.getNumClasses(); }
|
||||
|
||||
/// getBlocks - Return an array of blocks that are connected to Bundle.
|
||||
ArrayRef<unsigned> getBlocks(unsigned Bundle) const { return Blocks[Bundle]; }
|
||||
|
||||
/// getMachineFunction - Return the last machine function computed.
|
||||
const MachineFunction *getMachineFunction() const { return MF; }
|
||||
|
||||
/// view - Visualize the annotated bipartite CFG with Graphviz.
|
||||
void view() const;
|
||||
|
||||
private:
|
||||
virtual bool runOnMachineFunction(MachineFunction&);
|
||||
virtual void getAnalysisUsage(AnalysisUsage&) const;
|
||||
};
|
||||
|
||||
/// Specialize WriteGraph, the standard implementation won't work.
|
||||
raw_ostream &WriteGraph(raw_ostream &O, const EdgeBundles &G,
|
||||
bool ShortNames = false,
|
||||
const Twine &Title = "");
|
||||
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
432
thirdparty/clang/include/llvm/CodeGen/FastISel.h
vendored
Normal file
432
thirdparty/clang/include/llvm/CodeGen/FastISel.h
vendored
Normal file
@@ -0,0 +1,432 @@
|
||||
//===-- FastISel.h - Definition of the FastISel class ---------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines the FastISel class.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_FASTISEL_H
|
||||
#define LLVM_CODEGEN_FASTISEL_H
|
||||
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||
#include "llvm/CodeGen/ValueTypes.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class AllocaInst;
|
||||
class Constant;
|
||||
class ConstantFP;
|
||||
class FunctionLoweringInfo;
|
||||
class Instruction;
|
||||
class LoadInst;
|
||||
class MachineBasicBlock;
|
||||
class MachineConstantPool;
|
||||
class MachineFunction;
|
||||
class MachineInstr;
|
||||
class MachineFrameInfo;
|
||||
class MachineRegisterInfo;
|
||||
class DataLayout;
|
||||
class TargetInstrInfo;
|
||||
class TargetLibraryInfo;
|
||||
class TargetLowering;
|
||||
class TargetMachine;
|
||||
class TargetRegisterClass;
|
||||
class TargetRegisterInfo;
|
||||
class User;
|
||||
class Value;
|
||||
|
||||
/// FastISel - This is a fast-path instruction selection class that
|
||||
/// generates poor code and doesn't support illegal types or non-trivial
|
||||
/// lowering, but runs quickly.
|
||||
class FastISel {
|
||||
protected:
|
||||
DenseMap<const Value *, unsigned> LocalValueMap;
|
||||
FunctionLoweringInfo &FuncInfo;
|
||||
MachineRegisterInfo &MRI;
|
||||
MachineFrameInfo &MFI;
|
||||
MachineConstantPool &MCP;
|
||||
DebugLoc DL;
|
||||
const TargetMachine &TM;
|
||||
const DataLayout &TD;
|
||||
const TargetInstrInfo &TII;
|
||||
const TargetLowering &TLI;
|
||||
const TargetRegisterInfo &TRI;
|
||||
const TargetLibraryInfo *LibInfo;
|
||||
|
||||
/// The position of the last instruction for materializing constants
|
||||
/// for use in the current block. It resets to EmitStartPt when it
|
||||
/// makes sense (for example, it's usually profitable to avoid function
|
||||
/// calls between the definition and the use)
|
||||
MachineInstr *LastLocalValue;
|
||||
|
||||
/// The top most instruction in the current block that is allowed for
|
||||
/// emitting local variables. LastLocalValue resets to EmitStartPt when
|
||||
/// it makes sense (for example, on function calls)
|
||||
MachineInstr *EmitStartPt;
|
||||
|
||||
public:
|
||||
/// getLastLocalValue - Return the position of the last instruction
|
||||
/// emitted for materializing constants for use in the current block.
|
||||
MachineInstr *getLastLocalValue() { return LastLocalValue; }
|
||||
|
||||
/// setLastLocalValue - Update the position of the last instruction
|
||||
/// emitted for materializing constants for use in the current block.
|
||||
void setLastLocalValue(MachineInstr *I) {
|
||||
EmitStartPt = I;
|
||||
LastLocalValue = I;
|
||||
}
|
||||
|
||||
/// startNewBlock - Set the current block to which generated machine
|
||||
/// instructions will be appended, and clear the local CSE map.
|
||||
///
|
||||
void startNewBlock();
|
||||
|
||||
/// getCurDebugLoc() - Return current debug location information.
|
||||
DebugLoc getCurDebugLoc() const { return DL; }
|
||||
|
||||
/// LowerArguments - Do "fast" instruction selection for function arguments
|
||||
/// and append machine instructions to the current block. Return true if
|
||||
/// it is successful.
|
||||
bool LowerArguments();
|
||||
|
||||
/// SelectInstruction - Do "fast" instruction selection for the given
|
||||
/// LLVM IR instruction, and append generated machine instructions to
|
||||
/// the current block. Return true if selection was successful.
|
||||
///
|
||||
bool SelectInstruction(const Instruction *I);
|
||||
|
||||
/// SelectOperator - Do "fast" instruction selection for the given
|
||||
/// LLVM IR operator (Instruction or ConstantExpr), and append
|
||||
/// generated machine instructions to the current block. Return true
|
||||
/// if selection was successful.
|
||||
///
|
||||
bool SelectOperator(const User *I, unsigned Opcode);
|
||||
|
||||
/// getRegForValue - Create a virtual register and arrange for it to
|
||||
/// be assigned the value for the given LLVM value.
|
||||
unsigned getRegForValue(const Value *V);
|
||||
|
||||
/// lookUpRegForValue - Look up the value to see if its value is already
|
||||
/// cached in a register. It may be defined by instructions across blocks or
|
||||
/// defined locally.
|
||||
unsigned lookUpRegForValue(const Value *V);
|
||||
|
||||
/// getRegForGEPIndex - This is a wrapper around getRegForValue that also
|
||||
/// takes care of truncating or sign-extending the given getelementptr
|
||||
/// index value.
|
||||
std::pair<unsigned, bool> getRegForGEPIndex(const Value *V);
|
||||
|
||||
/// \brief We're checking to see if we can fold \p LI into \p FoldInst.
|
||||
/// Note that we could have a sequence where multiple LLVM IR instructions
|
||||
/// are folded into the same machineinstr. For example we could have:
|
||||
/// A: x = load i32 *P
|
||||
/// B: y = icmp A, 42
|
||||
/// C: br y, ...
|
||||
///
|
||||
/// In this scenario, \p LI is "A", and \p FoldInst is "C". We know
|
||||
/// about "B" (and any other folded instructions) because it is between
|
||||
/// A and C.
|
||||
///
|
||||
/// If we succeed folding, return true.
|
||||
///
|
||||
bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst);
|
||||
|
||||
/// \brief The specified machine instr operand is a vreg, and that
|
||||
/// vreg is being provided by the specified load instruction. If possible,
|
||||
/// try to fold the load as an operand to the instruction, returning true if
|
||||
/// possible.
|
||||
/// This method should be implemented by targets.
|
||||
virtual bool tryToFoldLoadIntoMI(MachineInstr * /*MI*/, unsigned /*OpNo*/,
|
||||
const LoadInst * /*LI*/) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// recomputeInsertPt - Reset InsertPt to prepare for inserting instructions
|
||||
/// into the current block.
|
||||
void recomputeInsertPt();
|
||||
|
||||
/// removeDeadCode - Remove all dead instructions between the I and E.
|
||||
void removeDeadCode(MachineBasicBlock::iterator I,
|
||||
MachineBasicBlock::iterator E);
|
||||
|
||||
struct SavePoint {
|
||||
MachineBasicBlock::iterator InsertPt;
|
||||
DebugLoc DL;
|
||||
};
|
||||
|
||||
/// enterLocalValueArea - Prepare InsertPt to begin inserting instructions
|
||||
/// into the local value area and return the old insert position.
|
||||
SavePoint enterLocalValueArea();
|
||||
|
||||
/// leaveLocalValueArea - Reset InsertPt to the given old insert position.
|
||||
void leaveLocalValueArea(SavePoint Old);
|
||||
|
||||
virtual ~FastISel();
|
||||
|
||||
protected:
|
||||
explicit FastISel(FunctionLoweringInfo &funcInfo,
|
||||
const TargetLibraryInfo *libInfo);
|
||||
|
||||
/// TargetSelectInstruction - This method is called by target-independent
|
||||
/// code when the normal FastISel process fails to select an instruction.
|
||||
/// This gives targets a chance to emit code for anything that doesn't
|
||||
/// fit into FastISel's framework. It returns true if it was successful.
|
||||
///
|
||||
virtual bool
|
||||
TargetSelectInstruction(const Instruction *I) = 0;
|
||||
|
||||
/// FastLowerArguments - This method is called by target-independent code to
|
||||
/// do target specific argument lowering. It returns true if it was
|
||||
/// successful.
|
||||
virtual bool FastLowerArguments();
|
||||
|
||||
/// FastEmit_r - This method is called by target-independent code
|
||||
/// to request that an instruction with the given type and opcode
|
||||
/// be emitted.
|
||||
virtual unsigned FastEmit_(MVT VT,
|
||||
MVT RetVT,
|
||||
unsigned Opcode);
|
||||
|
||||
/// FastEmit_r - This method is called by target-independent code
|
||||
/// to request that an instruction with the given type, opcode, and
|
||||
/// register operand be emitted.
|
||||
///
|
||||
virtual unsigned FastEmit_r(MVT VT,
|
||||
MVT RetVT,
|
||||
unsigned Opcode,
|
||||
unsigned Op0, bool Op0IsKill);
|
||||
|
||||
/// FastEmit_rr - This method is called by target-independent code
|
||||
/// to request that an instruction with the given type, opcode, and
|
||||
/// register operands be emitted.
|
||||
///
|
||||
virtual unsigned FastEmit_rr(MVT VT,
|
||||
MVT RetVT,
|
||||
unsigned Opcode,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
unsigned Op1, bool Op1IsKill);
|
||||
|
||||
/// FastEmit_ri - This method is called by target-independent code
|
||||
/// to request that an instruction with the given type, opcode, and
|
||||
/// register and immediate operands be emitted.
|
||||
///
|
||||
virtual unsigned FastEmit_ri(MVT VT,
|
||||
MVT RetVT,
|
||||
unsigned Opcode,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
uint64_t Imm);
|
||||
|
||||
/// FastEmit_rf - This method is called by target-independent code
|
||||
/// to request that an instruction with the given type, opcode, and
|
||||
/// register and floating-point immediate operands be emitted.
|
||||
///
|
||||
virtual unsigned FastEmit_rf(MVT VT,
|
||||
MVT RetVT,
|
||||
unsigned Opcode,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
const ConstantFP *FPImm);
|
||||
|
||||
/// FastEmit_rri - This method is called by target-independent code
|
||||
/// to request that an instruction with the given type, opcode, and
|
||||
/// register and immediate operands be emitted.
|
||||
///
|
||||
virtual unsigned FastEmit_rri(MVT VT,
|
||||
MVT RetVT,
|
||||
unsigned Opcode,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
unsigned Op1, bool Op1IsKill,
|
||||
uint64_t Imm);
|
||||
|
||||
/// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
|
||||
/// to emit an instruction with an immediate operand using FastEmit_ri.
|
||||
/// If that fails, it materializes the immediate into a register and try
|
||||
/// FastEmit_rr instead.
|
||||
unsigned FastEmit_ri_(MVT VT,
|
||||
unsigned Opcode,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
uint64_t Imm, MVT ImmType);
|
||||
|
||||
/// FastEmit_i - This method is called by target-independent code
|
||||
/// to request that an instruction with the given type, opcode, and
|
||||
/// immediate operand be emitted.
|
||||
virtual unsigned FastEmit_i(MVT VT,
|
||||
MVT RetVT,
|
||||
unsigned Opcode,
|
||||
uint64_t Imm);
|
||||
|
||||
/// FastEmit_f - This method is called by target-independent code
|
||||
/// to request that an instruction with the given type, opcode, and
|
||||
/// floating-point immediate operand be emitted.
|
||||
virtual unsigned FastEmit_f(MVT VT,
|
||||
MVT RetVT,
|
||||
unsigned Opcode,
|
||||
const ConstantFP *FPImm);
|
||||
|
||||
/// FastEmitInst_ - Emit a MachineInstr with no operands and a
|
||||
/// result register in the given register class.
|
||||
///
|
||||
unsigned FastEmitInst_(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC);
|
||||
|
||||
/// FastEmitInst_r - Emit a MachineInstr with one register operand
|
||||
/// and a result register in the given register class.
|
||||
///
|
||||
unsigned FastEmitInst_r(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill);
|
||||
|
||||
/// FastEmitInst_rr - Emit a MachineInstr with two register operands
|
||||
/// and a result register in the given register class.
|
||||
///
|
||||
unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
unsigned Op1, bool Op1IsKill);
|
||||
|
||||
/// FastEmitInst_rrr - Emit a MachineInstr with three register operands
|
||||
/// and a result register in the given register class.
|
||||
///
|
||||
unsigned FastEmitInst_rrr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
unsigned Op1, bool Op1IsKill,
|
||||
unsigned Op2, bool Op2IsKill);
|
||||
|
||||
/// FastEmitInst_ri - Emit a MachineInstr with a register operand,
|
||||
/// an immediate, and a result register in the given register class.
|
||||
///
|
||||
unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
uint64_t Imm);
|
||||
|
||||
/// FastEmitInst_rii - Emit a MachineInstr with one register operand
|
||||
/// and two immediate operands.
|
||||
///
|
||||
unsigned FastEmitInst_rii(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
uint64_t Imm1, uint64_t Imm2);
|
||||
|
||||
/// FastEmitInst_rf - Emit a MachineInstr with two register operands
|
||||
/// and a result register in the given register class.
|
||||
///
|
||||
unsigned FastEmitInst_rf(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
const ConstantFP *FPImm);
|
||||
|
||||
/// FastEmitInst_rri - Emit a MachineInstr with two register operands,
|
||||
/// an immediate, and a result register in the given register class.
|
||||
///
|
||||
unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
unsigned Op1, bool Op1IsKill,
|
||||
uint64_t Imm);
|
||||
|
||||
/// FastEmitInst_rrii - Emit a MachineInstr with two register operands,
|
||||
/// two immediates operands, and a result register in the given register
|
||||
/// class.
|
||||
unsigned FastEmitInst_rrii(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
unsigned Op1, bool Op1IsKill,
|
||||
uint64_t Imm1, uint64_t Imm2);
|
||||
|
||||
/// FastEmitInst_i - Emit a MachineInstr with a single immediate
|
||||
/// operand, and a result register in the given register class.
|
||||
unsigned FastEmitInst_i(unsigned MachineInstrOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
uint64_t Imm);
|
||||
|
||||
/// FastEmitInst_ii - Emit a MachineInstr with a two immediate operands.
|
||||
unsigned FastEmitInst_ii(unsigned MachineInstrOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
uint64_t Imm1, uint64_t Imm2);
|
||||
|
||||
/// FastEmitInst_extractsubreg - Emit a MachineInstr for an extract_subreg
|
||||
/// from a specified index of a superregister to a specified type.
|
||||
unsigned FastEmitInst_extractsubreg(MVT RetVT,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
uint32_t Idx);
|
||||
|
||||
/// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
|
||||
/// with all but the least significant bit set to zero.
|
||||
unsigned FastEmitZExtFromI1(MVT VT,
|
||||
unsigned Op0, bool Op0IsKill);
|
||||
|
||||
/// FastEmitBranch - Emit an unconditional branch to the given block,
|
||||
/// unless it is the immediate (fall-through) successor, and update
|
||||
/// the CFG.
|
||||
void FastEmitBranch(MachineBasicBlock *MBB, DebugLoc DL);
|
||||
|
||||
void UpdateValueMap(const Value* I, unsigned Reg, unsigned NumRegs = 1);
|
||||
|
||||
unsigned createResultReg(const TargetRegisterClass *RC);
|
||||
|
||||
/// TargetMaterializeConstant - Emit a constant in a register using
|
||||
/// target-specific logic, such as constant pool loads.
|
||||
virtual unsigned TargetMaterializeConstant(const Constant* C) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// TargetMaterializeAlloca - Emit an alloca address in a register using
|
||||
/// target-specific logic.
|
||||
virtual unsigned TargetMaterializeAlloca(const AllocaInst* C) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
virtual unsigned TargetMaterializeFloatZero(const ConstantFP* CF) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
private:
|
||||
bool SelectBinaryOp(const User *I, unsigned ISDOpcode);
|
||||
|
||||
bool SelectFNeg(const User *I);
|
||||
|
||||
bool SelectGetElementPtr(const User *I);
|
||||
|
||||
bool SelectCall(const User *I);
|
||||
|
||||
bool SelectBitCast(const User *I);
|
||||
|
||||
bool SelectCast(const User *I, unsigned Opcode);
|
||||
|
||||
bool SelectExtractValue(const User *I);
|
||||
|
||||
bool SelectInsertValue(const User *I);
|
||||
|
||||
/// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
|
||||
/// Emit code to ensure constants are copied into registers when needed.
|
||||
/// Remember the virtual registers that need to be added to the Machine PHI
|
||||
/// nodes as input. We cannot just directly add them, because expansion
|
||||
/// might result in multiple MBB's for one BB. As such, the start of the
|
||||
/// BB might correspond to a different MBB than the end.
|
||||
bool HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB);
|
||||
|
||||
/// materializeRegForValue - Helper for getRegForVale. This function is
|
||||
/// called when the value isn't already available in a register and must
|
||||
/// be materialized with new instructions.
|
||||
unsigned materializeRegForValue(const Value *V, MVT VT);
|
||||
|
||||
/// flushLocalValueMap - clears LocalValueMap and moves the area for the
|
||||
/// new local variables to the beginning of the block. It helps to avoid
|
||||
/// spilling cached variables across heavy instructions like calls.
|
||||
void flushLocalValueMap();
|
||||
|
||||
/// hasTrivialKill - Test whether the given value has exactly one use.
|
||||
bool hasTrivialKill(const Value *V) const;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
228
thirdparty/clang/include/llvm/CodeGen/FunctionLoweringInfo.h
vendored
Normal file
228
thirdparty/clang/include/llvm/CodeGen/FunctionLoweringInfo.h
vendored
Normal file
@@ -0,0 +1,228 @@
|
||||
//===-- FunctionLoweringInfo.h - Lower functions from LLVM IR to CodeGen --===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This implements routines for translating functions from LLVM IR into
|
||||
// Machine IR.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_FUNCTIONLOWERINGINFO_H
|
||||
#define LLVM_CODEGEN_FUNCTIONLOWERINGINFO_H
|
||||
|
||||
#include "llvm/ADT/APInt.h"
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/ADT/IndexedMap.h"
|
||||
#include "llvm/ADT/SmallPtrSet.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||
#include "llvm/CodeGen/ValueTypes.h"
|
||||
#include "llvm/IR/InlineAsm.h"
|
||||
#include "llvm/IR/Instructions.h"
|
||||
#include "llvm/Target/TargetRegisterInfo.h"
|
||||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class AllocaInst;
|
||||
class BasicBlock;
|
||||
class BranchProbabilityInfo;
|
||||
class CallInst;
|
||||
class Function;
|
||||
class GlobalVariable;
|
||||
class Instruction;
|
||||
class MachineInstr;
|
||||
class MachineBasicBlock;
|
||||
class MachineFunction;
|
||||
class MachineModuleInfo;
|
||||
class MachineRegisterInfo;
|
||||
class TargetLowering;
|
||||
class Value;
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
/// FunctionLoweringInfo - This contains information that is global to a
|
||||
/// function that is used when lowering a region of the function.
|
||||
///
|
||||
class FunctionLoweringInfo {
|
||||
public:
|
||||
const TargetLowering &TLI;
|
||||
const Function *Fn;
|
||||
MachineFunction *MF;
|
||||
MachineRegisterInfo *RegInfo;
|
||||
BranchProbabilityInfo *BPI;
|
||||
/// CanLowerReturn - true iff the function's return value can be lowered to
|
||||
/// registers.
|
||||
bool CanLowerReturn;
|
||||
|
||||
/// DemoteRegister - if CanLowerReturn is false, DemoteRegister is a vreg
|
||||
/// allocated to hold a pointer to the hidden sret parameter.
|
||||
unsigned DemoteRegister;
|
||||
|
||||
/// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
|
||||
DenseMap<const BasicBlock*, MachineBasicBlock *> MBBMap;
|
||||
|
||||
/// ValueMap - Since we emit code for the function a basic block at a time,
|
||||
/// we must remember which virtual registers hold the values for
|
||||
/// cross-basic-block values.
|
||||
DenseMap<const Value*, unsigned> ValueMap;
|
||||
|
||||
/// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
|
||||
/// the entry block. This allows the allocas to be efficiently referenced
|
||||
/// anywhere in the function.
|
||||
DenseMap<const AllocaInst*, int> StaticAllocaMap;
|
||||
|
||||
/// ByValArgFrameIndexMap - Keep track of frame indices for byval arguments.
|
||||
DenseMap<const Argument*, int> ByValArgFrameIndexMap;
|
||||
|
||||
/// ArgDbgValues - A list of DBG_VALUE instructions created during isel for
|
||||
/// function arguments that are inserted after scheduling is completed.
|
||||
SmallVector<MachineInstr*, 8> ArgDbgValues;
|
||||
|
||||
/// RegFixups - Registers which need to be replaced after isel is done.
|
||||
DenseMap<unsigned, unsigned> RegFixups;
|
||||
|
||||
/// MBB - The current block.
|
||||
MachineBasicBlock *MBB;
|
||||
|
||||
/// MBB - The current insert position inside the current block.
|
||||
MachineBasicBlock::iterator InsertPt;
|
||||
|
||||
#ifndef NDEBUG
|
||||
SmallPtrSet<const Instruction *, 8> CatchInfoLost;
|
||||
SmallPtrSet<const Instruction *, 8> CatchInfoFound;
|
||||
#endif
|
||||
|
||||
struct LiveOutInfo {
|
||||
unsigned NumSignBits : 31;
|
||||
bool IsValid : 1;
|
||||
APInt KnownOne, KnownZero;
|
||||
LiveOutInfo() : NumSignBits(0), IsValid(true), KnownOne(1, 0),
|
||||
KnownZero(1, 0) {}
|
||||
};
|
||||
|
||||
/// VisitedBBs - The set of basic blocks visited thus far by instruction
|
||||
/// selection.
|
||||
SmallPtrSet<const BasicBlock*, 4> VisitedBBs;
|
||||
|
||||
/// PHINodesToUpdate - A list of phi instructions whose operand list will
|
||||
/// be updated after processing the current basic block.
|
||||
/// TODO: This isn't per-function state, it's per-basic-block state. But
|
||||
/// there's no other convenient place for it to live right now.
|
||||
std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
|
||||
|
||||
explicit FunctionLoweringInfo(const TargetLowering &TLI);
|
||||
|
||||
/// set - Initialize this FunctionLoweringInfo with the given Function
|
||||
/// and its associated MachineFunction.
|
||||
///
|
||||
void set(const Function &Fn, MachineFunction &MF);
|
||||
|
||||
/// clear - Clear out all the function-specific state. This returns this
|
||||
/// FunctionLoweringInfo to an empty state, ready to be used for a
|
||||
/// different function.
|
||||
void clear();
|
||||
|
||||
/// isExportedInst - Return true if the specified value is an instruction
|
||||
/// exported from its block.
|
||||
bool isExportedInst(const Value *V) {
|
||||
return ValueMap.count(V);
|
||||
}
|
||||
|
||||
unsigned CreateReg(MVT VT);
|
||||
|
||||
unsigned CreateRegs(Type *Ty);
|
||||
|
||||
unsigned InitializeRegForValue(const Value *V) {
|
||||
unsigned &R = ValueMap[V];
|
||||
assert(R == 0 && "Already initialized this value register!");
|
||||
return R = CreateRegs(V->getType());
|
||||
}
|
||||
|
||||
/// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
|
||||
/// register is a PHI destination and the PHI's LiveOutInfo is not valid.
|
||||
const LiveOutInfo *GetLiveOutRegInfo(unsigned Reg) {
|
||||
if (!LiveOutRegInfo.inBounds(Reg))
|
||||
return NULL;
|
||||
|
||||
const LiveOutInfo *LOI = &LiveOutRegInfo[Reg];
|
||||
if (!LOI->IsValid)
|
||||
return NULL;
|
||||
|
||||
return LOI;
|
||||
}
|
||||
|
||||
/// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
|
||||
/// register is a PHI destination and the PHI's LiveOutInfo is not valid. If
|
||||
/// the register's LiveOutInfo is for a smaller bit width, it is extended to
|
||||
/// the larger bit width by zero extension. The bit width must be no smaller
|
||||
/// than the LiveOutInfo's existing bit width.
|
||||
const LiveOutInfo *GetLiveOutRegInfo(unsigned Reg, unsigned BitWidth);
|
||||
|
||||
/// AddLiveOutRegInfo - Adds LiveOutInfo for a register.
|
||||
void AddLiveOutRegInfo(unsigned Reg, unsigned NumSignBits,
|
||||
const APInt &KnownZero, const APInt &KnownOne) {
|
||||
// Only install this information if it tells us something.
|
||||
if (NumSignBits == 1 && KnownZero == 0 && KnownOne == 0)
|
||||
return;
|
||||
|
||||
LiveOutRegInfo.grow(Reg);
|
||||
LiveOutInfo &LOI = LiveOutRegInfo[Reg];
|
||||
LOI.NumSignBits = NumSignBits;
|
||||
LOI.KnownOne = KnownOne;
|
||||
LOI.KnownZero = KnownZero;
|
||||
}
|
||||
|
||||
/// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination
|
||||
/// register based on the LiveOutInfo of its operands.
|
||||
void ComputePHILiveOutRegInfo(const PHINode*);
|
||||
|
||||
/// InvalidatePHILiveOutRegInfo - Invalidates a PHI's LiveOutInfo, to be
|
||||
/// called when a block is visited before all of its predecessors.
|
||||
void InvalidatePHILiveOutRegInfo(const PHINode *PN) {
|
||||
// PHIs with no uses have no ValueMap entry.
|
||||
DenseMap<const Value*, unsigned>::const_iterator It = ValueMap.find(PN);
|
||||
if (It == ValueMap.end())
|
||||
return;
|
||||
|
||||
unsigned Reg = It->second;
|
||||
LiveOutRegInfo.grow(Reg);
|
||||
LiveOutRegInfo[Reg].IsValid = false;
|
||||
}
|
||||
|
||||
/// setArgumentFrameIndex - Record frame index for the byval
|
||||
/// argument.
|
||||
void setArgumentFrameIndex(const Argument *A, int FI);
|
||||
|
||||
/// getArgumentFrameIndex - Get frame index for the byval argument.
|
||||
int getArgumentFrameIndex(const Argument *A);
|
||||
|
||||
private:
|
||||
/// LiveOutRegInfo - Information about live out vregs.
|
||||
IndexedMap<LiveOutInfo, VirtReg2IndexFunctor> LiveOutRegInfo;
|
||||
};
|
||||
|
||||
/// ComputeUsesVAFloatArgument - Determine if any floating-point values are
|
||||
/// being passed to this variadic function, and set the MachineModuleInfo's
|
||||
/// usesVAFloatArgument flag if so. This flag is used to emit an undefined
|
||||
/// reference to _fltused on Windows, which will link in MSVCRT's
|
||||
/// floating-point support.
|
||||
void ComputeUsesVAFloatArgument(const CallInst &I, MachineModuleInfo *MMI);
|
||||
|
||||
/// AddCatchInfo - Extract the personality and type infos from an eh.selector
|
||||
/// call, and add them to the specified machine basic block.
|
||||
void AddCatchInfo(const CallInst &I,
|
||||
MachineModuleInfo *MMI, MachineBasicBlock *MBB);
|
||||
|
||||
/// AddLandingPadInfo - Extract the exception handling information from the
|
||||
/// landingpad instruction and add them to the specified machine module info.
|
||||
void AddLandingPadInfo(const LandingPadInst &I, MachineModuleInfo &MMI,
|
||||
MachineBasicBlock *MBB);
|
||||
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
200
thirdparty/clang/include/llvm/CodeGen/GCMetadata.h
vendored
Normal file
200
thirdparty/clang/include/llvm/CodeGen/GCMetadata.h
vendored
Normal file
@@ -0,0 +1,200 @@
|
||||
//===-- GCMetadata.h - Garbage collector metadata ---------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file declares the GCFunctionInfo and GCModuleInfo classes, which are
|
||||
// used as a communication channel from the target code generator to the target
|
||||
// garbage collectors. This interface allows code generators and garbage
|
||||
// collectors to be developed independently.
|
||||
//
|
||||
// The GCFunctionInfo class logs the data necessary to build a type accurate
|
||||
// stack map. The code generator outputs:
|
||||
//
|
||||
// - Safe points as specified by the GCStrategy's NeededSafePoints.
|
||||
// - Stack offsets for GC roots, as specified by calls to llvm.gcroot
|
||||
//
|
||||
// As a refinement, liveness analysis calculates the set of live roots at each
|
||||
// safe point. Liveness analysis is not presently performed by the code
|
||||
// generator, so all roots are assumed live.
|
||||
//
|
||||
// GCModuleInfo simply collects GCFunctionInfo instances for each Function as
|
||||
// they are compiled. This accretion is necessary for collectors which must emit
|
||||
// a stack map for the compilation unit as a whole. Therefore, GCFunctionInfo
|
||||
// outlives the MachineFunction from which it is derived and must not refer to
|
||||
// any code generator data structures.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_GCMETADATA_H
|
||||
#define LLVM_CODEGEN_GCMETADATA_H
|
||||
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/ADT/StringMap.h"
|
||||
#include "llvm/Pass.h"
|
||||
#include "llvm/Support/DebugLoc.h"
|
||||
|
||||
namespace llvm {
|
||||
class AsmPrinter;
|
||||
class GCStrategy;
|
||||
class Constant;
|
||||
class MCSymbol;
|
||||
|
||||
namespace GC {
|
||||
/// PointKind - The type of a collector-safe point.
|
||||
///
|
||||
enum PointKind {
|
||||
Loop, ///< Instr is a loop (backwards branch).
|
||||
Return, ///< Instr is a return instruction.
|
||||
PreCall, ///< Instr is a call instruction.
|
||||
PostCall ///< Instr is the return address of a call.
|
||||
};
|
||||
}
|
||||
|
||||
/// GCPoint - Metadata for a collector-safe point in machine code.
|
||||
///
|
||||
struct GCPoint {
|
||||
GC::PointKind Kind; ///< The kind of the safe point.
|
||||
MCSymbol *Label; ///< A label.
|
||||
DebugLoc Loc;
|
||||
|
||||
GCPoint(GC::PointKind K, MCSymbol *L, DebugLoc DL)
|
||||
: Kind(K), Label(L), Loc(DL) {}
|
||||
};
|
||||
|
||||
/// GCRoot - Metadata for a pointer to an object managed by the garbage
|
||||
/// collector.
|
||||
struct GCRoot {
|
||||
int Num; ///< Usually a frame index.
|
||||
int StackOffset; ///< Offset from the stack pointer.
|
||||
const Constant *Metadata; ///< Metadata straight from the call
|
||||
///< to llvm.gcroot.
|
||||
|
||||
GCRoot(int N, const Constant *MD) : Num(N), StackOffset(-1), Metadata(MD) {}
|
||||
};
|
||||
|
||||
|
||||
/// GCFunctionInfo - Garbage collection metadata for a single function.
|
||||
///
|
||||
class GCFunctionInfo {
|
||||
public:
|
||||
typedef std::vector<GCPoint>::iterator iterator;
|
||||
typedef std::vector<GCRoot>::iterator roots_iterator;
|
||||
typedef std::vector<GCRoot>::const_iterator live_iterator;
|
||||
|
||||
private:
|
||||
const Function &F;
|
||||
GCStrategy &S;
|
||||
uint64_t FrameSize;
|
||||
std::vector<GCRoot> Roots;
|
||||
std::vector<GCPoint> SafePoints;
|
||||
|
||||
// FIXME: Liveness. A 2D BitVector, perhaps?
|
||||
//
|
||||
// BitVector Liveness;
|
||||
//
|
||||
// bool islive(int point, int root) =
|
||||
// Liveness[point * SafePoints.size() + root]
|
||||
//
|
||||
// The bit vector is the more compact representation where >3.2% of roots
|
||||
// are live per safe point (1.5% on 64-bit hosts).
|
||||
|
||||
public:
|
||||
GCFunctionInfo(const Function &F, GCStrategy &S);
|
||||
~GCFunctionInfo();
|
||||
|
||||
/// getFunction - Return the function to which this metadata applies.
|
||||
///
|
||||
const Function &getFunction() const { return F; }
|
||||
|
||||
/// getStrategy - Return the GC strategy for the function.
|
||||
///
|
||||
GCStrategy &getStrategy() { return S; }
|
||||
|
||||
/// addStackRoot - Registers a root that lives on the stack. Num is the
|
||||
/// stack object ID for the alloca (if the code generator is
|
||||
// using MachineFrameInfo).
|
||||
void addStackRoot(int Num, const Constant *Metadata) {
|
||||
Roots.push_back(GCRoot(Num, Metadata));
|
||||
}
|
||||
|
||||
/// removeStackRoot - Removes a root.
|
||||
roots_iterator removeStackRoot(roots_iterator position) {
|
||||
return Roots.erase(position);
|
||||
}
|
||||
|
||||
/// addSafePoint - Notes the existence of a safe point. Num is the ID of the
|
||||
/// label just prior to the safe point (if the code generator is using
|
||||
/// MachineModuleInfo).
|
||||
void addSafePoint(GC::PointKind Kind, MCSymbol *Label, DebugLoc DL) {
|
||||
SafePoints.push_back(GCPoint(Kind, Label, DL));
|
||||
}
|
||||
|
||||
/// getFrameSize/setFrameSize - Records the function's frame size.
|
||||
///
|
||||
uint64_t getFrameSize() const { return FrameSize; }
|
||||
void setFrameSize(uint64_t S) { FrameSize = S; }
|
||||
|
||||
/// begin/end - Iterators for safe points.
|
||||
///
|
||||
iterator begin() { return SafePoints.begin(); }
|
||||
iterator end() { return SafePoints.end(); }
|
||||
size_t size() const { return SafePoints.size(); }
|
||||
|
||||
/// roots_begin/roots_end - Iterators for all roots in the function.
|
||||
///
|
||||
roots_iterator roots_begin() { return Roots.begin(); }
|
||||
roots_iterator roots_end () { return Roots.end(); }
|
||||
size_t roots_size() const { return Roots.size(); }
|
||||
|
||||
/// live_begin/live_end - Iterators for live roots at a given safe point.
|
||||
///
|
||||
live_iterator live_begin(const iterator &p) { return roots_begin(); }
|
||||
live_iterator live_end (const iterator &p) { return roots_end(); }
|
||||
size_t live_size(const iterator &p) const { return roots_size(); }
|
||||
};
|
||||
|
||||
|
||||
/// GCModuleInfo - Garbage collection metadata for a whole module.
|
||||
///
|
||||
class GCModuleInfo : public ImmutablePass {
|
||||
typedef StringMap<GCStrategy*> strategy_map_type;
|
||||
typedef std::vector<GCStrategy*> list_type;
|
||||
typedef DenseMap<const Function*,GCFunctionInfo*> finfo_map_type;
|
||||
|
||||
strategy_map_type StrategyMap;
|
||||
list_type StrategyList;
|
||||
finfo_map_type FInfoMap;
|
||||
|
||||
GCStrategy *getOrCreateStrategy(const Module *M, const std::string &Name);
|
||||
|
||||
public:
|
||||
typedef list_type::const_iterator iterator;
|
||||
|
||||
static char ID;
|
||||
|
||||
GCModuleInfo();
|
||||
~GCModuleInfo();
|
||||
|
||||
/// clear - Resets the pass. Any pass, which uses GCModuleInfo, should
|
||||
/// call it in doFinalization().
|
||||
///
|
||||
void clear();
|
||||
|
||||
/// begin/end - Iterators for used strategies.
|
||||
///
|
||||
iterator begin() const { return StrategyList.begin(); }
|
||||
iterator end() const { return StrategyList.end(); }
|
||||
|
||||
/// get - Look up function metadata.
|
||||
///
|
||||
GCFunctionInfo &getFunctionInfo(const Function &F);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
74
thirdparty/clang/include/llvm/CodeGen/GCMetadataPrinter.h
vendored
Normal file
74
thirdparty/clang/include/llvm/CodeGen/GCMetadataPrinter.h
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
//===-- llvm/CodeGen/GCMetadataPrinter.h - Prints asm GC tables -*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// The abstract base class GCMetadataPrinter supports writing GC metadata tables
|
||||
// as assembly code. This is a separate class from GCStrategy in order to allow
|
||||
// users of the LLVM JIT to avoid linking with the AsmWriter.
|
||||
//
|
||||
// Subclasses of GCMetadataPrinter must be registered using the
|
||||
// GCMetadataPrinterRegistry. This is separate from the GCStrategy itself
|
||||
// because these subclasses are logically plugins for the AsmWriter.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_GCMETADATAPRINTER_H
|
||||
#define LLVM_CODEGEN_GCMETADATAPRINTER_H
|
||||
|
||||
#include "llvm/CodeGen/GCMetadata.h"
|
||||
#include "llvm/CodeGen/GCStrategy.h"
|
||||
#include "llvm/Support/Registry.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class GCMetadataPrinter;
|
||||
|
||||
/// GCMetadataPrinterRegistry - The GC assembly printer registry uses all the
|
||||
/// defaults from Registry.
|
||||
typedef Registry<GCMetadataPrinter> GCMetadataPrinterRegistry;
|
||||
|
||||
/// GCMetadataPrinter - Emits GC metadata as assembly code.
|
||||
///
|
||||
class GCMetadataPrinter {
|
||||
public:
|
||||
typedef GCStrategy::list_type list_type;
|
||||
typedef GCStrategy::iterator iterator;
|
||||
|
||||
private:
|
||||
GCStrategy *S;
|
||||
|
||||
friend class AsmPrinter;
|
||||
|
||||
protected:
|
||||
// May only be subclassed.
|
||||
GCMetadataPrinter();
|
||||
|
||||
private:
|
||||
GCMetadataPrinter(const GCMetadataPrinter &) LLVM_DELETED_FUNCTION;
|
||||
GCMetadataPrinter &
|
||||
operator=(const GCMetadataPrinter &) LLVM_DELETED_FUNCTION;
|
||||
|
||||
public:
|
||||
GCStrategy &getStrategy() { return *S; }
|
||||
const Module &getModule() const { return S->getModule(); }
|
||||
|
||||
/// begin/end - Iterate over the collected function metadata.
|
||||
iterator begin() { return S->begin(); }
|
||||
iterator end() { return S->end(); }
|
||||
|
||||
/// beginAssembly/finishAssembly - Emit module metadata as assembly code.
|
||||
virtual void beginAssembly(AsmPrinter &AP);
|
||||
|
||||
virtual void finishAssembly(AsmPrinter &AP);
|
||||
|
||||
virtual ~GCMetadataPrinter();
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
153
thirdparty/clang/include/llvm/CodeGen/GCStrategy.h
vendored
Normal file
153
thirdparty/clang/include/llvm/CodeGen/GCStrategy.h
vendored
Normal file
@@ -0,0 +1,153 @@
|
||||
//===-- llvm/CodeGen/GCStrategy.h - Garbage collection ----------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// GCStrategy coordinates code generation algorithms and implements some itself
|
||||
// in order to generate code compatible with a target code generator as
|
||||
// specified in a function's 'gc' attribute. Algorithms are enabled by setting
|
||||
// flags in a subclass's constructor, and some virtual methods can be
|
||||
// overridden.
|
||||
//
|
||||
// When requested, the GCStrategy will be populated with data about each
|
||||
// function which uses it. Specifically:
|
||||
//
|
||||
// - Safe points
|
||||
// Garbage collection is generally only possible at certain points in code.
|
||||
// GCStrategy can request that the collector insert such points:
|
||||
//
|
||||
// - At and after any call to a subroutine
|
||||
// - Before returning from the current function
|
||||
// - Before backwards branches (loops)
|
||||
//
|
||||
// - Roots
|
||||
// When a reference to a GC-allocated object exists on the stack, it must be
|
||||
// stored in an alloca registered with llvm.gcoot.
|
||||
//
|
||||
// This information can used to emit the metadata tables which are required by
|
||||
// the target garbage collector runtime.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_GCSTRATEGY_H
|
||||
#define LLVM_CODEGEN_GCSTRATEGY_H
|
||||
|
||||
#include "llvm/CodeGen/GCMetadata.h"
|
||||
#include "llvm/CodeGen/MachineFunction.h"
|
||||
#include "llvm/Support/Registry.h"
|
||||
#include <string>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class GCStrategy;
|
||||
|
||||
/// The GC strategy registry uses all the defaults from Registry.
|
||||
///
|
||||
typedef Registry<GCStrategy> GCRegistry;
|
||||
|
||||
/// GCStrategy describes a garbage collector algorithm's code generation
|
||||
/// requirements, and provides overridable hooks for those needs which cannot
|
||||
/// be abstractly described.
|
||||
class GCStrategy {
|
||||
public:
|
||||
typedef std::vector<GCFunctionInfo*> list_type;
|
||||
typedef list_type::iterator iterator;
|
||||
|
||||
private:
|
||||
friend class GCModuleInfo;
|
||||
const Module *M;
|
||||
std::string Name;
|
||||
|
||||
list_type Functions;
|
||||
|
||||
protected:
|
||||
unsigned NeededSafePoints; ///< Bitmask of required safe points.
|
||||
bool CustomReadBarriers; ///< Default is to insert loads.
|
||||
bool CustomWriteBarriers; ///< Default is to insert stores.
|
||||
bool CustomRoots; ///< Default is to pass through to backend.
|
||||
bool CustomSafePoints; ///< Default is to use NeededSafePoints
|
||||
///< to find safe points.
|
||||
bool InitRoots; ///< If set, roots are nulled during lowering.
|
||||
bool UsesMetadata; ///< If set, backend must emit metadata tables.
|
||||
|
||||
public:
|
||||
GCStrategy();
|
||||
|
||||
virtual ~GCStrategy();
|
||||
|
||||
|
||||
/// getName - The name of the GC strategy, for debugging.
|
||||
///
|
||||
const std::string &getName() const { return Name; }
|
||||
|
||||
/// getModule - The module within which the GC strategy is operating.
|
||||
///
|
||||
const Module &getModule() const { return *M; }
|
||||
|
||||
/// needsSafePoitns - True if safe points of any kind are required. By
|
||||
// default, none are recorded.
|
||||
bool needsSafePoints() const {
|
||||
return CustomSafePoints || NeededSafePoints != 0;
|
||||
}
|
||||
|
||||
/// needsSafePoint(Kind) - True if the given kind of safe point is
|
||||
// required. By default, none are recorded.
|
||||
bool needsSafePoint(GC::PointKind Kind) const {
|
||||
return (NeededSafePoints & 1 << Kind) != 0;
|
||||
}
|
||||
|
||||
/// customWriteBarrier - By default, write barriers are replaced with simple
|
||||
/// store instructions. If true, then
|
||||
/// performCustomLowering must instead lower them.
|
||||
bool customWriteBarrier() const { return CustomWriteBarriers; }
|
||||
|
||||
/// customReadBarrier - By default, read barriers are replaced with simple
|
||||
/// load instructions. If true, then
|
||||
/// performCustomLowering must instead lower them.
|
||||
bool customReadBarrier() const { return CustomReadBarriers; }
|
||||
|
||||
/// customRoots - By default, roots are left for the code generator so it
|
||||
/// can generate a stack map. If true, then
|
||||
// performCustomLowering must delete them.
|
||||
bool customRoots() const { return CustomRoots; }
|
||||
|
||||
/// customSafePoints - By default, the GC analysis will find safe
|
||||
/// points according to NeededSafePoints. If true,
|
||||
/// then findCustomSafePoints must create them.
|
||||
bool customSafePoints() const { return CustomSafePoints; }
|
||||
|
||||
/// initializeRoots - If set, gcroot intrinsics should initialize their
|
||||
// allocas to null before the first use. This is
|
||||
// necessary for most GCs and is enabled by default.
|
||||
bool initializeRoots() const { return InitRoots; }
|
||||
|
||||
/// usesMetadata - If set, appropriate metadata tables must be emitted by
|
||||
/// the back-end (assembler, JIT, or otherwise).
|
||||
bool usesMetadata() const { return UsesMetadata; }
|
||||
|
||||
/// begin/end - Iterators for function metadata.
|
||||
///
|
||||
iterator begin() { return Functions.begin(); }
|
||||
iterator end() { return Functions.end(); }
|
||||
|
||||
/// insertFunctionMetadata - Creates metadata for a function.
|
||||
///
|
||||
GCFunctionInfo *insertFunctionInfo(const Function &F);
|
||||
|
||||
/// initializeCustomLowering/performCustomLowering - If any of the actions
|
||||
/// are set to custom, performCustomLowering must be overriden to transform
|
||||
/// the corresponding actions to LLVM IR. initializeCustomLowering is
|
||||
/// optional to override. These are the only GCStrategy methods through
|
||||
/// which the LLVM IR can be modified.
|
||||
virtual bool initializeCustomLowering(Module &F);
|
||||
virtual bool performCustomLowering(Function &F);
|
||||
virtual bool findCustomSafePoints(GCFunctionInfo& FI, MachineFunction& MF);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
41
thirdparty/clang/include/llvm/CodeGen/GCs.h
vendored
Normal file
41
thirdparty/clang/include/llvm/CodeGen/GCs.h
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
//===-- GCs.h - Garbage collector linkage hacks ---------------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file contains hack functions to force linking in the GC components.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_GCS_H
|
||||
#define LLVM_CODEGEN_GCS_H
|
||||
|
||||
namespace llvm {
|
||||
class GCStrategy;
|
||||
class GCMetadataPrinter;
|
||||
|
||||
/// FIXME: Collector instances are not useful on their own. These no longer
|
||||
/// serve any purpose except to link in the plugins.
|
||||
|
||||
/// Creates an ocaml-compatible garbage collector.
|
||||
void linkOcamlGC();
|
||||
|
||||
/// Creates an ocaml-compatible metadata printer.
|
||||
void linkOcamlGCPrinter();
|
||||
|
||||
/// Creates an erlang-compatible garbage collector.
|
||||
void linkErlangGC();
|
||||
|
||||
/// Creates an erlang-compatible metadata printer.
|
||||
void linkErlangGCPrinter();
|
||||
|
||||
/// Creates a shadow stack garbage collector. This collector requires no code
|
||||
/// generator support.
|
||||
void linkShadowStackGC();
|
||||
}
|
||||
|
||||
#endif
|
||||
819
thirdparty/clang/include/llvm/CodeGen/ISDOpcodes.h
vendored
Normal file
819
thirdparty/clang/include/llvm/CodeGen/ISDOpcodes.h
vendored
Normal file
@@ -0,0 +1,819 @@
|
||||
//===-- llvm/CodeGen/ISDOpcodes.h - CodeGen opcodes -------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file declares codegen opcodes and related utilities.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_ISDOPCODES_H
|
||||
#define LLVM_CODEGEN_ISDOPCODES_H
|
||||
|
||||
namespace llvm {
|
||||
|
||||
/// ISD namespace - This namespace contains an enum which represents all of the
|
||||
/// SelectionDAG node types and value types.
|
||||
///
|
||||
namespace ISD {
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
/// ISD::NodeType enum - This enum defines the target-independent operators
|
||||
/// for a SelectionDAG.
|
||||
///
|
||||
/// Targets may also define target-dependent operator codes for SDNodes. For
|
||||
/// example, on x86, these are the enum values in the X86ISD namespace.
|
||||
/// Targets should aim to use target-independent operators to model their
|
||||
/// instruction sets as much as possible, and only use target-dependent
|
||||
/// operators when they have special requirements.
|
||||
///
|
||||
/// Finally, during and after selection proper, SNodes may use special
|
||||
/// operator codes that correspond directly with MachineInstr opcodes. These
|
||||
/// are used to represent selected instructions. See the isMachineOpcode()
|
||||
/// and getMachineOpcode() member functions of SDNode.
|
||||
///
|
||||
enum NodeType {
|
||||
/// DELETED_NODE - This is an illegal value that is used to catch
|
||||
/// errors. This opcode is not a legal opcode for any node.
|
||||
DELETED_NODE,
|
||||
|
||||
/// EntryToken - This is the marker used to indicate the start of a region.
|
||||
EntryToken,
|
||||
|
||||
/// TokenFactor - This node takes multiple tokens as input and produces a
|
||||
/// single token result. This is used to represent the fact that the operand
|
||||
/// operators are independent of each other.
|
||||
TokenFactor,
|
||||
|
||||
/// AssertSext, AssertZext - These nodes record if a register contains a
|
||||
/// value that has already been zero or sign extended from a narrower type.
|
||||
/// These nodes take two operands. The first is the node that has already
|
||||
/// been extended, and the second is a value type node indicating the width
|
||||
/// of the extension
|
||||
AssertSext, AssertZext,
|
||||
|
||||
/// Various leaf nodes.
|
||||
BasicBlock, VALUETYPE, CONDCODE, Register, RegisterMask,
|
||||
Constant, ConstantFP,
|
||||
GlobalAddress, GlobalTLSAddress, FrameIndex,
|
||||
JumpTable, ConstantPool, ExternalSymbol, BlockAddress,
|
||||
|
||||
/// The address of the GOT
|
||||
GLOBAL_OFFSET_TABLE,
|
||||
|
||||
/// FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and
|
||||
/// llvm.returnaddress on the DAG. These nodes take one operand, the index
|
||||
/// of the frame or return address to return. An index of zero corresponds
|
||||
/// to the current function's frame or return address, an index of one to
|
||||
/// the parent's frame or return address, and so on.
|
||||
FRAMEADDR, RETURNADDR,
|
||||
|
||||
/// FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to
|
||||
/// first (possible) on-stack argument. This is needed for correct stack
|
||||
/// adjustment during unwind.
|
||||
FRAME_TO_ARGS_OFFSET,
|
||||
|
||||
/// RESULT, OUTCHAIN = EXCEPTIONADDR(INCHAIN) - This node represents the
|
||||
/// address of the exception block on entry to an landing pad block.
|
||||
EXCEPTIONADDR,
|
||||
|
||||
/// RESULT, OUTCHAIN = LSDAADDR(INCHAIN) - This node represents the
|
||||
/// address of the Language Specific Data Area for the enclosing function.
|
||||
LSDAADDR,
|
||||
|
||||
/// RESULT, OUTCHAIN = EHSELECTION(INCHAIN, EXCEPTION) - This node
|
||||
/// represents the selection index of the exception thrown.
|
||||
EHSELECTION,
|
||||
|
||||
/// OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents
|
||||
/// 'eh_return' gcc dwarf builtin, which is used to return from
|
||||
/// exception. The general meaning is: adjust stack by OFFSET and pass
|
||||
/// execution to HANDLER. Many platform-related details also :)
|
||||
EH_RETURN,
|
||||
|
||||
/// RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer)
|
||||
/// This corresponds to the eh.sjlj.setjmp intrinsic.
|
||||
/// It takes an input chain and a pointer to the jump buffer as inputs
|
||||
/// and returns an outchain.
|
||||
EH_SJLJ_SETJMP,
|
||||
|
||||
/// OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer)
|
||||
/// This corresponds to the eh.sjlj.longjmp intrinsic.
|
||||
/// It takes an input chain and a pointer to the jump buffer as inputs
|
||||
/// and returns an outchain.
|
||||
EH_SJLJ_LONGJMP,
|
||||
|
||||
/// TargetConstant* - Like Constant*, but the DAG does not do any folding,
|
||||
/// simplification, or lowering of the constant. They are used for constants
|
||||
/// which are known to fit in the immediate fields of their users, or for
|
||||
/// carrying magic numbers which are not values which need to be
|
||||
/// materialized in registers.
|
||||
TargetConstant,
|
||||
TargetConstantFP,
|
||||
|
||||
/// TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or
|
||||
/// anything else with this node, and this is valid in the target-specific
|
||||
/// dag, turning into a GlobalAddress operand.
|
||||
TargetGlobalAddress,
|
||||
TargetGlobalTLSAddress,
|
||||
TargetFrameIndex,
|
||||
TargetJumpTable,
|
||||
TargetConstantPool,
|
||||
TargetExternalSymbol,
|
||||
TargetBlockAddress,
|
||||
|
||||
/// TargetIndex - Like a constant pool entry, but with completely
|
||||
/// target-dependent semantics. Holds target flags, a 32-bit index, and a
|
||||
/// 64-bit index. Targets can use this however they like.
|
||||
TargetIndex,
|
||||
|
||||
/// RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...)
|
||||
/// This node represents a target intrinsic function with no side effects.
|
||||
/// The first operand is the ID number of the intrinsic from the
|
||||
/// llvm::Intrinsic namespace. The operands to the intrinsic follow. The
|
||||
/// node returns the result of the intrinsic.
|
||||
INTRINSIC_WO_CHAIN,
|
||||
|
||||
/// RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...)
|
||||
/// This node represents a target intrinsic function with side effects that
|
||||
/// returns a result. The first operand is a chain pointer. The second is
|
||||
/// the ID number of the intrinsic from the llvm::Intrinsic namespace. The
|
||||
/// operands to the intrinsic follow. The node has two results, the result
|
||||
/// of the intrinsic and an output chain.
|
||||
INTRINSIC_W_CHAIN,
|
||||
|
||||
/// OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...)
|
||||
/// This node represents a target intrinsic function with side effects that
|
||||
/// does not return a result. The first operand is a chain pointer. The
|
||||
/// second is the ID number of the intrinsic from the llvm::Intrinsic
|
||||
/// namespace. The operands to the intrinsic follow.
|
||||
INTRINSIC_VOID,
|
||||
|
||||
/// CopyToReg - This node has three operands: a chain, a register number to
|
||||
/// set to this value, and a value.
|
||||
CopyToReg,
|
||||
|
||||
/// CopyFromReg - This node indicates that the input value is a virtual or
|
||||
/// physical register that is defined outside of the scope of this
|
||||
/// SelectionDAG. The register is available from the RegisterSDNode object.
|
||||
CopyFromReg,
|
||||
|
||||
/// UNDEF - An undefined node.
|
||||
UNDEF,
|
||||
|
||||
/// EXTRACT_ELEMENT - This is used to get the lower or upper (determined by
|
||||
/// a Constant, which is required to be operand #1) half of the integer or
|
||||
/// float value specified as operand #0. This is only for use before
|
||||
/// legalization, for values that will be broken into multiple registers.
|
||||
EXTRACT_ELEMENT,
|
||||
|
||||
/// BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
|
||||
/// Given two values of the same integer value type, this produces a value
|
||||
/// twice as big. Like EXTRACT_ELEMENT, this can only be used before
|
||||
/// legalization.
|
||||
BUILD_PAIR,
|
||||
|
||||
/// MERGE_VALUES - This node takes multiple discrete operands and returns
|
||||
/// them all as its individual results. This nodes has exactly the same
|
||||
/// number of inputs and outputs. This node is useful for some pieces of the
|
||||
/// code generator that want to think about a single node with multiple
|
||||
/// results, not multiple nodes.
|
||||
MERGE_VALUES,
|
||||
|
||||
/// Simple integer binary arithmetic operators.
|
||||
ADD, SUB, MUL, SDIV, UDIV, SREM, UREM,
|
||||
|
||||
/// SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing
|
||||
/// a signed/unsigned value of type i[2*N], and return the full value as
|
||||
/// two results, each of type iN.
|
||||
SMUL_LOHI, UMUL_LOHI,
|
||||
|
||||
/// SDIVREM/UDIVREM - Divide two integers and produce both a quotient and
|
||||
/// remainder result.
|
||||
SDIVREM, UDIVREM,
|
||||
|
||||
/// CARRY_FALSE - This node is used when folding other nodes,
|
||||
/// like ADDC/SUBC, which indicate the carry result is always false.
|
||||
CARRY_FALSE,
|
||||
|
||||
/// Carry-setting nodes for multiple precision addition and subtraction.
|
||||
/// These nodes take two operands of the same value type, and produce two
|
||||
/// results. The first result is the normal add or sub result, the second
|
||||
/// result is the carry flag result.
|
||||
ADDC, SUBC,
|
||||
|
||||
/// Carry-using nodes for multiple precision addition and subtraction. These
|
||||
/// nodes take three operands: The first two are the normal lhs and rhs to
|
||||
/// the add or sub, and the third is the input carry flag. These nodes
|
||||
/// produce two results; the normal result of the add or sub, and the output
|
||||
/// carry flag. These nodes both read and write a carry flag to allow them
|
||||
/// to them to be chained together for add and sub of arbitrarily large
|
||||
/// values.
|
||||
ADDE, SUBE,
|
||||
|
||||
/// RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
|
||||
/// These nodes take two operands: the normal LHS and RHS to the add. They
|
||||
/// produce two results: the normal result of the add, and a boolean that
|
||||
/// indicates if an overflow occurred (*not* a flag, because it may be store
|
||||
/// to memory, etc.). If the type of the boolean is not i1 then the high
|
||||
/// bits conform to getBooleanContents.
|
||||
/// These nodes are generated from llvm.[su]add.with.overflow intrinsics.
|
||||
SADDO, UADDO,
|
||||
|
||||
/// Same for subtraction.
|
||||
SSUBO, USUBO,
|
||||
|
||||
/// Same for multiplication.
|
||||
SMULO, UMULO,
|
||||
|
||||
/// Simple binary floating point operators.
|
||||
FADD, FSUB, FMUL, FMA, FDIV, FREM,
|
||||
|
||||
/// FCOPYSIGN(X, Y) - Return the value of X with the sign of Y. NOTE: This
|
||||
/// DAG node does not require that X and Y have the same type, just that the
|
||||
/// are both floating point. X and the result must have the same type.
|
||||
/// FCOPYSIGN(f32, f64) is allowed.
|
||||
FCOPYSIGN,
|
||||
|
||||
/// INT = FGETSIGN(FP) - Return the sign bit of the specified floating point
|
||||
/// value as an integer 0/1 value.
|
||||
FGETSIGN,
|
||||
|
||||
/// BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the
|
||||
/// specified, possibly variable, elements. The number of elements is
|
||||
/// required to be a power of two. The types of the operands must all be
|
||||
/// the same and must match the vector element type, except that integer
|
||||
/// types are allowed to be larger than the element type, in which case
|
||||
/// the operands are implicitly truncated.
|
||||
BUILD_VECTOR,
|
||||
|
||||
/// INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element
|
||||
/// at IDX replaced with VAL. If the type of VAL is larger than the vector
|
||||
/// element type then VAL is truncated before replacement.
|
||||
INSERT_VECTOR_ELT,
|
||||
|
||||
/// EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR
|
||||
/// identified by the (potentially variable) element number IDX. If the
|
||||
/// return type is an integer type larger than the element type of the
|
||||
/// vector, the result is extended to the width of the return type.
|
||||
EXTRACT_VECTOR_ELT,
|
||||
|
||||
/// CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of
|
||||
/// vector type with the same length and element type, this produces a
|
||||
/// concatenated vector result value, with length equal to the sum of the
|
||||
/// lengths of the input vectors.
|
||||
CONCAT_VECTORS,
|
||||
|
||||
/// INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector
|
||||
/// with VECTOR2 inserted into VECTOR1 at the (potentially
|
||||
/// variable) element number IDX, which must be a multiple of the
|
||||
/// VECTOR2 vector length. The elements of VECTOR1 starting at
|
||||
/// IDX are overwritten with VECTOR2. Elements IDX through
|
||||
/// vector_length(VECTOR2) must be valid VECTOR1 indices.
|
||||
INSERT_SUBVECTOR,
|
||||
|
||||
/// EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an
|
||||
/// vector value) starting with the element number IDX, which must be a
|
||||
/// constant multiple of the result vector length.
|
||||
EXTRACT_SUBVECTOR,
|
||||
|
||||
/// VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as
|
||||
/// VEC1/VEC2. A VECTOR_SHUFFLE node also contains an array of constant int
|
||||
/// values that indicate which value (or undef) each result element will
|
||||
/// get. These constant ints are accessible through the
|
||||
/// ShuffleVectorSDNode class. This is quite similar to the Altivec
|
||||
/// 'vperm' instruction, except that the indices must be constants and are
|
||||
/// in terms of the element size of VEC1/VEC2, not in terms of bytes.
|
||||
VECTOR_SHUFFLE,
|
||||
|
||||
/// SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a
|
||||
/// scalar value into element 0 of the resultant vector type. The top
|
||||
/// elements 1 to N-1 of the N-element vector are undefined. The type
|
||||
/// of the operand must match the vector element type, except when they
|
||||
/// are integer types. In this case the operand is allowed to be wider
|
||||
/// than the vector element type, and is implicitly truncated to it.
|
||||
SCALAR_TO_VECTOR,
|
||||
|
||||
/// MULHU/MULHS - Multiply high - Multiply two integers of type iN,
|
||||
/// producing an unsigned/signed value of type i[2*N], then return the top
|
||||
/// part.
|
||||
MULHU, MULHS,
|
||||
|
||||
/// Bitwise operators - logical and, logical or, logical xor.
|
||||
AND, OR, XOR,
|
||||
|
||||
/// Shift and rotation operations. After legalization, the type of the
|
||||
/// shift amount is known to be TLI.getShiftAmountTy(). Before legalization
|
||||
/// the shift amount can be any type, but care must be taken to ensure it is
|
||||
/// large enough. TLI.getShiftAmountTy() is i8 on some targets, but before
|
||||
/// legalization, types like i1024 can occur and i8 doesn't have enough bits
|
||||
/// to represent the shift amount.
|
||||
/// When the 1st operand is a vector, the shift amount must be in the same
|
||||
/// type. (TLI.getShiftAmountTy() will return the same type when the input
|
||||
/// type is a vector.)
|
||||
SHL, SRA, SRL, ROTL, ROTR,
|
||||
|
||||
/// Byte Swap and Counting operators.
|
||||
BSWAP, CTTZ, CTLZ, CTPOP,
|
||||
|
||||
/// Bit counting operators with an undefined result for zero inputs.
|
||||
CTTZ_ZERO_UNDEF, CTLZ_ZERO_UNDEF,
|
||||
|
||||
/// Select(COND, TRUEVAL, FALSEVAL). If the type of the boolean COND is not
|
||||
/// i1 then the high bits must conform to getBooleanContents.
|
||||
SELECT,
|
||||
|
||||
/// Select with a vector condition (op #0) and two vector operands (ops #1
|
||||
/// and #2), returning a vector result. All vectors have the same length.
|
||||
/// Much like the scalar select and setcc, each bit in the condition selects
|
||||
/// whether the corresponding result element is taken from op #1 or op #2.
|
||||
/// At first, the VSELECT condition is of vXi1 type. Later, targets may
|
||||
/// change the condition type in order to match the VSELECT node using a
|
||||
/// pattern. The condition follows the BooleanContent format of the target.
|
||||
VSELECT,
|
||||
|
||||
/// Select with condition operator - This selects between a true value and
|
||||
/// a false value (ops #2 and #3) based on the boolean result of comparing
|
||||
/// the lhs and rhs (ops #0 and #1) of a conditional expression with the
|
||||
/// condition code in op #4, a CondCodeSDNode.
|
||||
SELECT_CC,
|
||||
|
||||
/// SetCC operator - This evaluates to a true value iff the condition is
|
||||
/// true. If the result value type is not i1 then the high bits conform
|
||||
/// to getBooleanContents. The operands to this are the left and right
|
||||
/// operands to compare (ops #0, and #1) and the condition code to compare
|
||||
/// them with (op #2) as a CondCodeSDNode. If the operands are vector types
|
||||
/// then the result type must also be a vector type.
|
||||
SETCC,
|
||||
|
||||
/// SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded
|
||||
/// integer shift operations, just like ADD/SUB_PARTS. The operation
|
||||
/// ordering is:
|
||||
/// [Lo,Hi] = op [LoLHS,HiLHS], Amt
|
||||
SHL_PARTS, SRA_PARTS, SRL_PARTS,
|
||||
|
||||
/// Conversion operators. These are all single input single output
|
||||
/// operations. For all of these, the result type must be strictly
|
||||
/// wider or narrower (depending on the operation) than the source
|
||||
/// type.
|
||||
|
||||
/// SIGN_EXTEND - Used for integer types, replicating the sign bit
|
||||
/// into new bits.
|
||||
SIGN_EXTEND,
|
||||
|
||||
/// ZERO_EXTEND - Used for integer types, zeroing the new bits.
|
||||
ZERO_EXTEND,
|
||||
|
||||
/// ANY_EXTEND - Used for integer types. The high bits are undefined.
|
||||
ANY_EXTEND,
|
||||
|
||||
/// TRUNCATE - Completely drop the high bits.
|
||||
TRUNCATE,
|
||||
|
||||
/// [SU]INT_TO_FP - These operators convert integers (whose interpreted sign
|
||||
/// depends on the first letter) to floating point.
|
||||
SINT_TO_FP,
|
||||
UINT_TO_FP,
|
||||
|
||||
/// SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to
|
||||
/// sign extend a small value in a large integer register (e.g. sign
|
||||
/// extending the low 8 bits of a 32-bit register to fill the top 24 bits
|
||||
/// with the 7th bit). The size of the smaller type is indicated by the 1th
|
||||
/// operand, a ValueType node.
|
||||
SIGN_EXTEND_INREG,
|
||||
|
||||
/// FP_TO_[US]INT - Convert a floating point value to a signed or unsigned
|
||||
/// integer.
|
||||
FP_TO_SINT,
|
||||
FP_TO_UINT,
|
||||
|
||||
/// X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type
|
||||
/// down to the precision of the destination VT. TRUNC is a flag, which is
|
||||
/// always an integer that is zero or one. If TRUNC is 0, this is a
|
||||
/// normal rounding, if it is 1, this FP_ROUND is known to not change the
|
||||
/// value of Y.
|
||||
///
|
||||
/// The TRUNC = 1 case is used in cases where we know that the value will
|
||||
/// not be modified by the node, because Y is not using any of the extra
|
||||
/// precision of source type. This allows certain transformations like
|
||||
/// FP_EXTEND(FP_ROUND(X,1)) -> X which are not safe for
|
||||
/// FP_EXTEND(FP_ROUND(X,0)) because the extra bits aren't removed.
|
||||
FP_ROUND,
|
||||
|
||||
/// FLT_ROUNDS_ - Returns current rounding mode:
|
||||
/// -1 Undefined
|
||||
/// 0 Round to 0
|
||||
/// 1 Round to nearest
|
||||
/// 2 Round to +inf
|
||||
/// 3 Round to -inf
|
||||
FLT_ROUNDS_,
|
||||
|
||||
/// X = FP_ROUND_INREG(Y, VT) - This operator takes an FP register, and
|
||||
/// rounds it to a floating point value. It then promotes it and returns it
|
||||
/// in a register of the same size. This operation effectively just
|
||||
/// discards excess precision. The type to round down to is specified by
|
||||
/// the VT operand, a VTSDNode.
|
||||
FP_ROUND_INREG,
|
||||
|
||||
/// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
|
||||
FP_EXTEND,
|
||||
|
||||
/// BITCAST - This operator converts between integer, vector and FP
|
||||
/// values, as if the value was stored to memory with one type and loaded
|
||||
/// from the same address with the other type (or equivalently for vector
|
||||
/// format conversions, etc). The source and result are required to have
|
||||
/// the same bit size (e.g. f32 <-> i32). This can also be used for
|
||||
/// int-to-int or fp-to-fp conversions, but that is a noop, deleted by
|
||||
/// getNode().
|
||||
BITCAST,
|
||||
|
||||
/// CONVERT_RNDSAT - This operator is used to support various conversions
|
||||
/// between various types (float, signed, unsigned and vectors of those
|
||||
/// types) with rounding and saturation. NOTE: Avoid using this operator as
|
||||
/// most target don't support it and the operator might be removed in the
|
||||
/// future. It takes the following arguments:
|
||||
/// 0) value
|
||||
/// 1) dest type (type to convert to)
|
||||
/// 2) src type (type to convert from)
|
||||
/// 3) rounding imm
|
||||
/// 4) saturation imm
|
||||
/// 5) ISD::CvtCode indicating the type of conversion to do
|
||||
CONVERT_RNDSAT,
|
||||
|
||||
/// FP16_TO_FP32, FP32_TO_FP16 - These operators are used to perform
|
||||
/// promotions and truncation for half-precision (16 bit) floating
|
||||
/// numbers. We need special nodes since FP16 is a storage-only type with
|
||||
/// special semantics of operations.
|
||||
FP16_TO_FP32, FP32_TO_FP16,
|
||||
|
||||
/// FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
|
||||
/// FLOG, FLOG2, FLOG10, FEXP, FEXP2,
|
||||
/// FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR - Perform various unary
|
||||
/// floating point operations. These are inspired by libm.
|
||||
FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
|
||||
FLOG, FLOG2, FLOG10, FEXP, FEXP2,
|
||||
FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR,
|
||||
|
||||
/// FSINCOS - Compute both fsin and fcos as a single operation.
|
||||
FSINCOS,
|
||||
|
||||
/// LOAD and STORE have token chains as their first operand, then the same
|
||||
/// operands as an LLVM load/store instruction, then an offset node that
|
||||
/// is added / subtracted from the base pointer to form the address (for
|
||||
/// indexed memory ops).
|
||||
LOAD, STORE,
|
||||
|
||||
/// DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned
|
||||
/// to a specified boundary. This node always has two return values: a new
|
||||
/// stack pointer value and a chain. The first operand is the token chain,
|
||||
/// the second is the number of bytes to allocate, and the third is the
|
||||
/// alignment boundary. The size is guaranteed to be a multiple of the
|
||||
/// stack alignment, and the alignment is guaranteed to be bigger than the
|
||||
/// stack alignment (if required) or 0 to get standard stack alignment.
|
||||
DYNAMIC_STACKALLOC,
|
||||
|
||||
/// Control flow instructions. These all have token chains.
|
||||
|
||||
/// BR - Unconditional branch. The first operand is the chain
|
||||
/// operand, the second is the MBB to branch to.
|
||||
BR,
|
||||
|
||||
/// BRIND - Indirect branch. The first operand is the chain, the second
|
||||
/// is the value to branch to, which must be of the same type as the
|
||||
/// target's pointer type.
|
||||
BRIND,
|
||||
|
||||
/// BR_JT - Jumptable branch. The first operand is the chain, the second
|
||||
/// is the jumptable index, the last one is the jumptable entry index.
|
||||
BR_JT,
|
||||
|
||||
/// BRCOND - Conditional branch. The first operand is the chain, the
|
||||
/// second is the condition, the third is the block to branch to if the
|
||||
/// condition is true. If the type of the condition is not i1, then the
|
||||
/// high bits must conform to getBooleanContents.
|
||||
BRCOND,
|
||||
|
||||
/// BR_CC - Conditional branch. The behavior is like that of SELECT_CC, in
|
||||
/// that the condition is represented as condition code, and two nodes to
|
||||
/// compare, rather than as a combined SetCC node. The operands in order
|
||||
/// are chain, cc, lhs, rhs, block to branch to if condition is true.
|
||||
BR_CC,
|
||||
|
||||
/// INLINEASM - Represents an inline asm block. This node always has two
|
||||
/// return values: a chain and a flag result. The inputs are as follows:
|
||||
/// Operand #0 : Input chain.
|
||||
/// Operand #1 : a ExternalSymbolSDNode with a pointer to the asm string.
|
||||
/// Operand #2 : a MDNodeSDNode with the !srcloc metadata.
|
||||
/// Operand #3 : HasSideEffect, IsAlignStack bits.
|
||||
/// After this, it is followed by a list of operands with this format:
|
||||
/// ConstantSDNode: Flags that encode whether it is a mem or not, the
|
||||
/// of operands that follow, etc. See InlineAsm.h.
|
||||
/// ... however many operands ...
|
||||
/// Operand #last: Optional, an incoming flag.
|
||||
///
|
||||
/// The variable width operands are required to represent target addressing
|
||||
/// modes as a single "operand", even though they may have multiple
|
||||
/// SDOperands.
|
||||
INLINEASM,
|
||||
|
||||
/// EH_LABEL - Represents a label in mid basic block used to track
|
||||
/// locations needed for debug and exception handling tables. These nodes
|
||||
/// take a chain as input and return a chain.
|
||||
EH_LABEL,
|
||||
|
||||
/// STACKSAVE - STACKSAVE has one operand, an input chain. It produces a
|
||||
/// value, the same type as the pointer type for the system, and an output
|
||||
/// chain.
|
||||
STACKSAVE,
|
||||
|
||||
/// STACKRESTORE has two operands, an input chain and a pointer to restore
|
||||
/// to it returns an output chain.
|
||||
STACKRESTORE,
|
||||
|
||||
/// CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end
|
||||
/// of a call sequence, and carry arbitrary information that target might
|
||||
/// want to know. The first operand is a chain, the rest are specified by
|
||||
/// the target and not touched by the DAG optimizers.
|
||||
/// CALLSEQ_START..CALLSEQ_END pairs may not be nested.
|
||||
CALLSEQ_START, // Beginning of a call sequence
|
||||
CALLSEQ_END, // End of a call sequence
|
||||
|
||||
/// VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE,
|
||||
/// and the alignment. It returns a pair of values: the vaarg value and a
|
||||
/// new chain.
|
||||
VAARG,
|
||||
|
||||
/// VACOPY - VACOPY has 5 operands: an input chain, a destination pointer,
|
||||
/// a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the
|
||||
/// source.
|
||||
VACOPY,
|
||||
|
||||
/// VAEND, VASTART - VAEND and VASTART have three operands: an input chain,
|
||||
/// pointer, and a SRCVALUE.
|
||||
VAEND, VASTART,
|
||||
|
||||
/// SRCVALUE - This is a node type that holds a Value* that is used to
|
||||
/// make reference to a value in the LLVM IR.
|
||||
SRCVALUE,
|
||||
|
||||
/// MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to
|
||||
/// reference metadata in the IR.
|
||||
MDNODE_SDNODE,
|
||||
|
||||
/// PCMARKER - This corresponds to the pcmarker intrinsic.
|
||||
PCMARKER,
|
||||
|
||||
/// READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
|
||||
/// The only operand is a chain and a value and a chain are produced. The
|
||||
/// value is the contents of the architecture specific cycle counter like
|
||||
/// register (or other high accuracy low latency clock source)
|
||||
READCYCLECOUNTER,
|
||||
|
||||
/// HANDLENODE node - Used as a handle for various purposes.
|
||||
HANDLENODE,
|
||||
|
||||
/// INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic. It
|
||||
/// takes as input a token chain, the pointer to the trampoline, the pointer
|
||||
/// to the nested function, the pointer to pass for the 'nest' parameter, a
|
||||
/// SRCVALUE for the trampoline and another for the nested function
|
||||
/// (allowing targets to access the original Function*).
|
||||
/// It produces a token chain as output.
|
||||
INIT_TRAMPOLINE,
|
||||
|
||||
/// ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
|
||||
/// It takes a pointer to the trampoline and produces a (possibly) new
|
||||
/// pointer to the same trampoline with platform-specific adjustments
|
||||
/// applied. The pointer it returns points to an executable block of code.
|
||||
ADJUST_TRAMPOLINE,
|
||||
|
||||
/// TRAP - Trapping instruction
|
||||
TRAP,
|
||||
|
||||
/// DEBUGTRAP - Trap intended to get the attention of a debugger.
|
||||
DEBUGTRAP,
|
||||
|
||||
/// PREFETCH - This corresponds to a prefetch intrinsic. The first operand
|
||||
/// is the chain. The other operands are the address to prefetch,
|
||||
/// read / write specifier, locality specifier and instruction / data cache
|
||||
/// specifier.
|
||||
PREFETCH,
|
||||
|
||||
/// OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope)
|
||||
/// This corresponds to the fence instruction. It takes an input chain, and
|
||||
/// two integer constants: an AtomicOrdering and a SynchronizationScope.
|
||||
ATOMIC_FENCE,
|
||||
|
||||
/// Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr)
|
||||
/// This corresponds to "load atomic" instruction.
|
||||
ATOMIC_LOAD,
|
||||
|
||||
/// OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr, val)
|
||||
/// This corresponds to "store atomic" instruction.
|
||||
ATOMIC_STORE,
|
||||
|
||||
/// Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
|
||||
/// This corresponds to the cmpxchg instruction.
|
||||
ATOMIC_CMP_SWAP,
|
||||
|
||||
/// Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt)
|
||||
/// Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt)
|
||||
/// These correspond to the atomicrmw instruction.
|
||||
ATOMIC_SWAP,
|
||||
ATOMIC_LOAD_ADD,
|
||||
ATOMIC_LOAD_SUB,
|
||||
ATOMIC_LOAD_AND,
|
||||
ATOMIC_LOAD_OR,
|
||||
ATOMIC_LOAD_XOR,
|
||||
ATOMIC_LOAD_NAND,
|
||||
ATOMIC_LOAD_MIN,
|
||||
ATOMIC_LOAD_MAX,
|
||||
ATOMIC_LOAD_UMIN,
|
||||
ATOMIC_LOAD_UMAX,
|
||||
|
||||
/// This corresponds to the llvm.lifetime.* intrinsics. The first operand
|
||||
/// is the chain and the second operand is the alloca pointer.
|
||||
LIFETIME_START, LIFETIME_END,
|
||||
|
||||
/// BUILTIN_OP_END - This must be the last enum value in this list.
|
||||
/// The target-specific pre-isel opcode values start here.
|
||||
BUILTIN_OP_END
|
||||
};
|
||||
|
||||
/// FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations
|
||||
/// which do not reference a specific memory location should be less than
|
||||
/// this value. Those that do must not be less than this value, and can
|
||||
/// be used with SelectionDAG::getMemIntrinsicNode.
|
||||
static const int FIRST_TARGET_MEMORY_OPCODE = BUILTIN_OP_END+150;
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
/// MemIndexedMode enum - This enum defines the load / store indexed
|
||||
/// addressing modes.
|
||||
///
|
||||
/// UNINDEXED "Normal" load / store. The effective address is already
|
||||
/// computed and is available in the base pointer. The offset
|
||||
/// operand is always undefined. In addition to producing a
|
||||
/// chain, an unindexed load produces one value (result of the
|
||||
/// load); an unindexed store does not produce a value.
|
||||
///
|
||||
/// PRE_INC Similar to the unindexed mode where the effective address is
|
||||
/// PRE_DEC the value of the base pointer add / subtract the offset.
|
||||
/// It considers the computation as being folded into the load /
|
||||
/// store operation (i.e. the load / store does the address
|
||||
/// computation as well as performing the memory transaction).
|
||||
/// The base operand is always undefined. In addition to
|
||||
/// producing a chain, pre-indexed load produces two values
|
||||
/// (result of the load and the result of the address
|
||||
/// computation); a pre-indexed store produces one value (result
|
||||
/// of the address computation).
|
||||
///
|
||||
/// POST_INC The effective address is the value of the base pointer. The
|
||||
/// POST_DEC value of the offset operand is then added to / subtracted
|
||||
/// from the base after memory transaction. In addition to
|
||||
/// producing a chain, post-indexed load produces two values
|
||||
/// (the result of the load and the result of the base +/- offset
|
||||
/// computation); a post-indexed store produces one value (the
|
||||
/// the result of the base +/- offset computation).
|
||||
enum MemIndexedMode {
|
||||
UNINDEXED = 0,
|
||||
PRE_INC,
|
||||
PRE_DEC,
|
||||
POST_INC,
|
||||
POST_DEC,
|
||||
LAST_INDEXED_MODE
|
||||
};
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
/// LoadExtType enum - This enum defines the three variants of LOADEXT
|
||||
/// (load with extension).
|
||||
///
|
||||
/// SEXTLOAD loads the integer operand and sign extends it to a larger
|
||||
/// integer result type.
|
||||
/// ZEXTLOAD loads the integer operand and zero extends it to a larger
|
||||
/// integer result type.
|
||||
/// EXTLOAD is used for two things: floating point extending loads and
|
||||
/// integer extending loads [the top bits are undefined].
|
||||
enum LoadExtType {
|
||||
NON_EXTLOAD = 0,
|
||||
EXTLOAD,
|
||||
SEXTLOAD,
|
||||
ZEXTLOAD,
|
||||
LAST_LOADEXT_TYPE
|
||||
};
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
/// ISD::CondCode enum - These are ordered carefully to make the bitfields
|
||||
/// below work out, when considering SETFALSE (something that never exists
|
||||
/// dynamically) as 0. "U" -> Unsigned (for integer operands) or Unordered
|
||||
/// (for floating point), "L" -> Less than, "G" -> Greater than, "E" -> Equal
|
||||
/// to. If the "N" column is 1, the result of the comparison is undefined if
|
||||
/// the input is a NAN.
|
||||
///
|
||||
/// All of these (except for the 'always folded ops') should be handled for
|
||||
/// floating point. For integer, only the SETEQ,SETNE,SETLT,SETLE,SETGT,
|
||||
/// SETGE,SETULT,SETULE,SETUGT, and SETUGE opcodes are used.
|
||||
///
|
||||
/// Note that these are laid out in a specific order to allow bit-twiddling
|
||||
/// to transform conditions.
|
||||
enum CondCode {
|
||||
// Opcode N U L G E Intuitive operation
|
||||
SETFALSE, // 0 0 0 0 Always false (always folded)
|
||||
SETOEQ, // 0 0 0 1 True if ordered and equal
|
||||
SETOGT, // 0 0 1 0 True if ordered and greater than
|
||||
SETOGE, // 0 0 1 1 True if ordered and greater than or equal
|
||||
SETOLT, // 0 1 0 0 True if ordered and less than
|
||||
SETOLE, // 0 1 0 1 True if ordered and less than or equal
|
||||
SETONE, // 0 1 1 0 True if ordered and operands are unequal
|
||||
SETO, // 0 1 1 1 True if ordered (no nans)
|
||||
SETUO, // 1 0 0 0 True if unordered: isnan(X) | isnan(Y)
|
||||
SETUEQ, // 1 0 0 1 True if unordered or equal
|
||||
SETUGT, // 1 0 1 0 True if unordered or greater than
|
||||
SETUGE, // 1 0 1 1 True if unordered, greater than, or equal
|
||||
SETULT, // 1 1 0 0 True if unordered or less than
|
||||
SETULE, // 1 1 0 1 True if unordered, less than, or equal
|
||||
SETUNE, // 1 1 1 0 True if unordered or not equal
|
||||
SETTRUE, // 1 1 1 1 Always true (always folded)
|
||||
// Don't care operations: undefined if the input is a nan.
|
||||
SETFALSE2, // 1 X 0 0 0 Always false (always folded)
|
||||
SETEQ, // 1 X 0 0 1 True if equal
|
||||
SETGT, // 1 X 0 1 0 True if greater than
|
||||
SETGE, // 1 X 0 1 1 True if greater than or equal
|
||||
SETLT, // 1 X 1 0 0 True if less than
|
||||
SETLE, // 1 X 1 0 1 True if less than or equal
|
||||
SETNE, // 1 X 1 1 0 True if not equal
|
||||
SETTRUE2, // 1 X 1 1 1 Always true (always folded)
|
||||
|
||||
SETCC_INVALID // Marker value.
|
||||
};
|
||||
|
||||
/// isSignedIntSetCC - Return true if this is a setcc instruction that
|
||||
/// performs a signed comparison when used with integer operands.
|
||||
inline bool isSignedIntSetCC(CondCode Code) {
|
||||
return Code == SETGT || Code == SETGE || Code == SETLT || Code == SETLE;
|
||||
}
|
||||
|
||||
/// isUnsignedIntSetCC - Return true if this is a setcc instruction that
|
||||
/// performs an unsigned comparison when used with integer operands.
|
||||
inline bool isUnsignedIntSetCC(CondCode Code) {
|
||||
return Code == SETUGT || Code == SETUGE || Code == SETULT || Code == SETULE;
|
||||
}
|
||||
|
||||
/// isTrueWhenEqual - Return true if the specified condition returns true if
|
||||
/// the two operands to the condition are equal. Note that if one of the two
|
||||
/// operands is a NaN, this value is meaningless.
|
||||
inline bool isTrueWhenEqual(CondCode Cond) {
|
||||
return ((int)Cond & 1) != 0;
|
||||
}
|
||||
|
||||
/// getUnorderedFlavor - This function returns 0 if the condition is always
|
||||
/// false if an operand is a NaN, 1 if the condition is always true if the
|
||||
/// operand is a NaN, and 2 if the condition is undefined if the operand is a
|
||||
/// NaN.
|
||||
inline unsigned getUnorderedFlavor(CondCode Cond) {
|
||||
return ((int)Cond >> 3) & 3;
|
||||
}
|
||||
|
||||
/// getSetCCInverse - Return the operation corresponding to !(X op Y), where
|
||||
/// 'op' is a valid SetCC operation.
|
||||
CondCode getSetCCInverse(CondCode Operation, bool isInteger);
|
||||
|
||||
/// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
|
||||
/// when given the operation for (X op Y).
|
||||
CondCode getSetCCSwappedOperands(CondCode Operation);
|
||||
|
||||
/// getSetCCOrOperation - Return the result of a logical OR between different
|
||||
/// comparisons of identical values: ((X op1 Y) | (X op2 Y)). This
|
||||
/// function returns SETCC_INVALID if it is not possible to represent the
|
||||
/// resultant comparison.
|
||||
CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, bool isInteger);
|
||||
|
||||
/// getSetCCAndOperation - Return the result of a logical AND between
|
||||
/// different comparisons of identical values: ((X op1 Y) & (X op2 Y)). This
|
||||
/// function returns SETCC_INVALID if it is not possible to represent the
|
||||
/// resultant comparison.
|
||||
CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, bool isInteger);
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
/// CvtCode enum - This enum defines the various converts CONVERT_RNDSAT
|
||||
/// supports.
|
||||
enum CvtCode {
|
||||
CVT_FF, /// Float from Float
|
||||
CVT_FS, /// Float from Signed
|
||||
CVT_FU, /// Float from Unsigned
|
||||
CVT_SF, /// Signed from Float
|
||||
CVT_UF, /// Unsigned from Float
|
||||
CVT_SS, /// Signed from Signed
|
||||
CVT_SU, /// Signed from Unsigned
|
||||
CVT_US, /// Unsigned from Signed
|
||||
CVT_UU, /// Unsigned from Unsigned
|
||||
CVT_INVALID /// Marker - Invalid opcode
|
||||
};
|
||||
|
||||
} // end llvm::ISD namespace
|
||||
|
||||
} // end llvm namespace
|
||||
|
||||
#endif
|
||||
59
thirdparty/clang/include/llvm/CodeGen/IntrinsicLowering.h
vendored
Normal file
59
thirdparty/clang/include/llvm/CodeGen/IntrinsicLowering.h
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
//===-- IntrinsicLowering.h - Intrinsic Function Lowering -------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines the IntrinsicLowering interface. This interface allows
|
||||
// addition of domain-specific or front-end specific intrinsics to LLVM without
|
||||
// having to modify all of the C backend or interpreter.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_INTRINSICLOWERING_H
|
||||
#define LLVM_CODEGEN_INTRINSICLOWERING_H
|
||||
|
||||
#include "llvm/IR/Intrinsics.h"
|
||||
|
||||
namespace llvm {
|
||||
class CallInst;
|
||||
class Module;
|
||||
class DataLayout;
|
||||
|
||||
class IntrinsicLowering {
|
||||
const DataLayout& TD;
|
||||
|
||||
|
||||
bool Warned;
|
||||
public:
|
||||
explicit IntrinsicLowering(const DataLayout &td) :
|
||||
TD(td), Warned(false) {}
|
||||
|
||||
/// AddPrototypes - This method, if called, causes all of the prototypes
|
||||
/// that might be needed by an intrinsic lowering implementation to be
|
||||
/// inserted into the module specified.
|
||||
void AddPrototypes(Module &M);
|
||||
|
||||
/// LowerIntrinsicCall - This method replaces a call with the LLVM function
|
||||
/// which should be used to implement the specified intrinsic function call.
|
||||
/// If an intrinsic function must be implemented by the code generator
|
||||
/// (such as va_start), this function should print a message and abort.
|
||||
///
|
||||
/// Otherwise, if an intrinsic function call can be lowered, the code to
|
||||
/// implement it (often a call to a non-intrinsic function) is inserted
|
||||
/// _after_ the call instruction and the call is deleted. The caller must
|
||||
/// be capable of handling this kind of change.
|
||||
///
|
||||
void LowerIntrinsicCall(CallInst *CI);
|
||||
|
||||
/// LowerToByteSwap - Replace a call instruction into a call to bswap
|
||||
/// intrinsic. Return false if it has determined the call is not a
|
||||
/// simple integer bswap.
|
||||
static bool LowerToByteSwap(CallInst *CI);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
341
thirdparty/clang/include/llvm/CodeGen/JITCodeEmitter.h
vendored
Normal file
341
thirdparty/clang/include/llvm/CodeGen/JITCodeEmitter.h
vendored
Normal file
@@ -0,0 +1,341 @@
|
||||
//===-- llvm/CodeGen/JITCodeEmitter.h - Code emission ----------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines an abstract interface that is used by the machine code
|
||||
// emission framework to output the code. This allows machine code emission to
|
||||
// be separated from concerns such as resolution of call targets, and where the
|
||||
// machine code will be written (memory or disk, f.e.).
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_JITCODEEMITTER_H
|
||||
#define LLVM_CODEGEN_JITCODEEMITTER_H
|
||||
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/CodeGen/MachineCodeEmitter.h"
|
||||
#include "llvm/Support/DataTypes.h"
|
||||
#include "llvm/Support/MathExtras.h"
|
||||
#include <string>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class MachineBasicBlock;
|
||||
class MachineConstantPool;
|
||||
class MachineJumpTableInfo;
|
||||
class MachineFunction;
|
||||
class MachineModuleInfo;
|
||||
class MachineRelocation;
|
||||
class Value;
|
||||
class GlobalValue;
|
||||
class Function;
|
||||
|
||||
/// JITCodeEmitter - This class defines two sorts of methods: those for
|
||||
/// emitting the actual bytes of machine code, and those for emitting auxiliary
|
||||
/// structures, such as jump tables, relocations, etc.
|
||||
///
|
||||
/// Emission of machine code is complicated by the fact that we don't (in
|
||||
/// general) know the size of the machine code that we're about to emit before
|
||||
/// we emit it. As such, we preallocate a certain amount of memory, and set the
|
||||
/// BufferBegin/BufferEnd pointers to the start and end of the buffer. As we
|
||||
/// emit machine instructions, we advance the CurBufferPtr to indicate the
|
||||
/// location of the next byte to emit. In the case of a buffer overflow (we
|
||||
/// need to emit more machine code than we have allocated space for), the
|
||||
/// CurBufferPtr will saturate to BufferEnd and ignore stores. Once the entire
|
||||
/// function has been emitted, the overflow condition is checked, and if it has
|
||||
/// occurred, more memory is allocated, and we reemit the code into it.
|
||||
///
|
||||
class JITCodeEmitter : public MachineCodeEmitter {
|
||||
virtual void anchor();
|
||||
public:
|
||||
virtual ~JITCodeEmitter() {}
|
||||
|
||||
/// startFunction - This callback is invoked when the specified function is
|
||||
/// about to be code generated. This initializes the BufferBegin/End/Ptr
|
||||
/// fields.
|
||||
///
|
||||
virtual void startFunction(MachineFunction &F) = 0;
|
||||
|
||||
/// finishFunction - This callback is invoked when the specified function has
|
||||
/// finished code generation. If a buffer overflow has occurred, this method
|
||||
/// returns true (the callee is required to try again), otherwise it returns
|
||||
/// false.
|
||||
///
|
||||
virtual bool finishFunction(MachineFunction &F) = 0;
|
||||
|
||||
/// allocIndirectGV - Allocates and fills storage for an indirect
|
||||
/// GlobalValue, and returns the address.
|
||||
virtual void *allocIndirectGV(const GlobalValue *GV,
|
||||
const uint8_t *Buffer, size_t Size,
|
||||
unsigned Alignment) = 0;
|
||||
|
||||
/// emitByte - This callback is invoked when a byte needs to be written to the
|
||||
/// output stream.
|
||||
///
|
||||
void emitByte(uint8_t B) {
|
||||
if (CurBufferPtr != BufferEnd)
|
||||
*CurBufferPtr++ = B;
|
||||
}
|
||||
|
||||
/// emitWordLE - This callback is invoked when a 32-bit word needs to be
|
||||
/// written to the output stream in little-endian format.
|
||||
///
|
||||
void emitWordLE(uint32_t W) {
|
||||
if (4 <= BufferEnd-CurBufferPtr) {
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 0);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 8);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 16);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 24);
|
||||
} else {
|
||||
CurBufferPtr = BufferEnd;
|
||||
}
|
||||
}
|
||||
|
||||
/// emitWordBE - This callback is invoked when a 32-bit word needs to be
|
||||
/// written to the output stream in big-endian format.
|
||||
///
|
||||
void emitWordBE(uint32_t W) {
|
||||
if (4 <= BufferEnd-CurBufferPtr) {
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 24);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 16);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 8);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 0);
|
||||
} else {
|
||||
CurBufferPtr = BufferEnd;
|
||||
}
|
||||
}
|
||||
|
||||
/// emitDWordLE - This callback is invoked when a 64-bit word needs to be
|
||||
/// written to the output stream in little-endian format.
|
||||
///
|
||||
void emitDWordLE(uint64_t W) {
|
||||
if (8 <= BufferEnd-CurBufferPtr) {
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 0);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 8);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 16);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 24);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 32);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 40);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 48);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 56);
|
||||
} else {
|
||||
CurBufferPtr = BufferEnd;
|
||||
}
|
||||
}
|
||||
|
||||
/// emitDWordBE - This callback is invoked when a 64-bit word needs to be
|
||||
/// written to the output stream in big-endian format.
|
||||
///
|
||||
void emitDWordBE(uint64_t W) {
|
||||
if (8 <= BufferEnd-CurBufferPtr) {
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 56);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 48);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 40);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 32);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 24);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 16);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 8);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 0);
|
||||
} else {
|
||||
CurBufferPtr = BufferEnd;
|
||||
}
|
||||
}
|
||||
|
||||
/// emitAlignment - Move the CurBufferPtr pointer up to the specified
|
||||
/// alignment (saturated to BufferEnd of course).
|
||||
void emitAlignment(unsigned Alignment) {
|
||||
if (Alignment == 0) Alignment = 1;
|
||||
uint8_t *NewPtr = (uint8_t*)RoundUpToAlignment((uintptr_t)CurBufferPtr,
|
||||
Alignment);
|
||||
CurBufferPtr = std::min(NewPtr, BufferEnd);
|
||||
}
|
||||
|
||||
/// emitAlignmentWithFill - Similar to emitAlignment, except that the
|
||||
/// extra bytes are filled with the provided byte.
|
||||
void emitAlignmentWithFill(unsigned Alignment, uint8_t Fill) {
|
||||
if (Alignment == 0) Alignment = 1;
|
||||
uint8_t *NewPtr = (uint8_t*)RoundUpToAlignment((uintptr_t)CurBufferPtr,
|
||||
Alignment);
|
||||
// Fail if we don't have room.
|
||||
if (NewPtr > BufferEnd) {
|
||||
CurBufferPtr = BufferEnd;
|
||||
return;
|
||||
}
|
||||
while (CurBufferPtr < NewPtr) {
|
||||
*CurBufferPtr++ = Fill;
|
||||
}
|
||||
}
|
||||
|
||||
/// emitULEB128Bytes - This callback is invoked when a ULEB128 needs to be
|
||||
/// written to the output stream.
|
||||
void emitULEB128Bytes(uint64_t Value, unsigned PadTo = 0) {
|
||||
do {
|
||||
uint8_t Byte = Value & 0x7f;
|
||||
Value >>= 7;
|
||||
if (Value || PadTo != 0) Byte |= 0x80;
|
||||
emitByte(Byte);
|
||||
} while (Value);
|
||||
|
||||
if (PadTo) {
|
||||
do {
|
||||
uint8_t Byte = (PadTo > 1) ? 0x80 : 0x0;
|
||||
emitByte(Byte);
|
||||
} while (--PadTo);
|
||||
}
|
||||
}
|
||||
|
||||
/// emitSLEB128Bytes - This callback is invoked when a SLEB128 needs to be
|
||||
/// written to the output stream.
|
||||
void emitSLEB128Bytes(int64_t Value) {
|
||||
int32_t Sign = Value >> (8 * sizeof(Value) - 1);
|
||||
bool IsMore;
|
||||
|
||||
do {
|
||||
uint8_t Byte = Value & 0x7f;
|
||||
Value >>= 7;
|
||||
IsMore = Value != Sign || ((Byte ^ Sign) & 0x40) != 0;
|
||||
if (IsMore) Byte |= 0x80;
|
||||
emitByte(Byte);
|
||||
} while (IsMore);
|
||||
}
|
||||
|
||||
/// emitString - This callback is invoked when a String needs to be
|
||||
/// written to the output stream.
|
||||
void emitString(const std::string &String) {
|
||||
for (size_t i = 0, N = String.size(); i < N; ++i) {
|
||||
uint8_t C = String[i];
|
||||
emitByte(C);
|
||||
}
|
||||
emitByte(0);
|
||||
}
|
||||
|
||||
/// emitInt32 - Emit a int32 directive.
|
||||
void emitInt32(uint32_t Value) {
|
||||
if (4 <= BufferEnd-CurBufferPtr) {
|
||||
*((uint32_t*)CurBufferPtr) = Value;
|
||||
CurBufferPtr += 4;
|
||||
} else {
|
||||
CurBufferPtr = BufferEnd;
|
||||
}
|
||||
}
|
||||
|
||||
/// emitInt64 - Emit a int64 directive.
|
||||
void emitInt64(uint64_t Value) {
|
||||
if (8 <= BufferEnd-CurBufferPtr) {
|
||||
*((uint64_t*)CurBufferPtr) = Value;
|
||||
CurBufferPtr += 8;
|
||||
} else {
|
||||
CurBufferPtr = BufferEnd;
|
||||
}
|
||||
}
|
||||
|
||||
/// emitInt32At - Emit the Int32 Value in Addr.
|
||||
void emitInt32At(uintptr_t *Addr, uintptr_t Value) {
|
||||
if (Addr >= (uintptr_t*)BufferBegin && Addr < (uintptr_t*)BufferEnd)
|
||||
(*(uint32_t*)Addr) = (uint32_t)Value;
|
||||
}
|
||||
|
||||
/// emitInt64At - Emit the Int64 Value in Addr.
|
||||
void emitInt64At(uintptr_t *Addr, uintptr_t Value) {
|
||||
if (Addr >= (uintptr_t*)BufferBegin && Addr < (uintptr_t*)BufferEnd)
|
||||
(*(uint64_t*)Addr) = (uint64_t)Value;
|
||||
}
|
||||
|
||||
|
||||
/// emitLabel - Emits a label
|
||||
virtual void emitLabel(MCSymbol *Label) = 0;
|
||||
|
||||
/// allocateSpace - Allocate a block of space in the current output buffer,
|
||||
/// returning null (and setting conditions to indicate buffer overflow) on
|
||||
/// failure. Alignment is the alignment in bytes of the buffer desired.
|
||||
virtual void *allocateSpace(uintptr_t Size, unsigned Alignment) {
|
||||
emitAlignment(Alignment);
|
||||
void *Result;
|
||||
|
||||
// Check for buffer overflow.
|
||||
if (Size >= (uintptr_t)(BufferEnd-CurBufferPtr)) {
|
||||
CurBufferPtr = BufferEnd;
|
||||
Result = 0;
|
||||
} else {
|
||||
// Allocate the space.
|
||||
Result = CurBufferPtr;
|
||||
CurBufferPtr += Size;
|
||||
}
|
||||
|
||||
return Result;
|
||||
}
|
||||
|
||||
/// allocateGlobal - Allocate memory for a global. Unlike allocateSpace,
|
||||
/// this method does not allocate memory in the current output buffer,
|
||||
/// because a global may live longer than the current function.
|
||||
virtual void *allocateGlobal(uintptr_t Size, unsigned Alignment) = 0;
|
||||
|
||||
/// StartMachineBasicBlock - This should be called by the target when a new
|
||||
/// basic block is about to be emitted. This way the MCE knows where the
|
||||
/// start of the block is, and can implement getMachineBasicBlockAddress.
|
||||
virtual void StartMachineBasicBlock(MachineBasicBlock *MBB) = 0;
|
||||
|
||||
/// getCurrentPCValue - This returns the address that the next emitted byte
|
||||
/// will be output to.
|
||||
///
|
||||
virtual uintptr_t getCurrentPCValue() const {
|
||||
return (uintptr_t)CurBufferPtr;
|
||||
}
|
||||
|
||||
/// getCurrentPCOffset - Return the offset from the start of the emitted
|
||||
/// buffer that we are currently writing to.
|
||||
uintptr_t getCurrentPCOffset() const {
|
||||
return CurBufferPtr-BufferBegin;
|
||||
}
|
||||
|
||||
/// earlyResolveAddresses - True if the code emitter can use symbol addresses
|
||||
/// during code emission time. The JIT is capable of doing this because it
|
||||
/// creates jump tables or constant pools in memory on the fly while the
|
||||
/// object code emitters rely on a linker to have real addresses and should
|
||||
/// use relocations instead.
|
||||
bool earlyResolveAddresses() const { return true; }
|
||||
|
||||
/// addRelocation - Whenever a relocatable address is needed, it should be
|
||||
/// noted with this interface.
|
||||
virtual void addRelocation(const MachineRelocation &MR) = 0;
|
||||
|
||||
/// FIXME: These should all be handled with relocations!
|
||||
|
||||
/// getConstantPoolEntryAddress - Return the address of the 'Index' entry in
|
||||
/// the constant pool that was last emitted with the emitConstantPool method.
|
||||
///
|
||||
virtual uintptr_t getConstantPoolEntryAddress(unsigned Index) const = 0;
|
||||
|
||||
/// getJumpTableEntryAddress - Return the address of the jump table with index
|
||||
/// 'Index' in the function that last called initJumpTableInfo.
|
||||
///
|
||||
virtual uintptr_t getJumpTableEntryAddress(unsigned Index) const = 0;
|
||||
|
||||
/// getMachineBasicBlockAddress - Return the address of the specified
|
||||
/// MachineBasicBlock, only usable after the label for the MBB has been
|
||||
/// emitted.
|
||||
///
|
||||
virtual uintptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const= 0;
|
||||
|
||||
/// getLabelAddress - Return the address of the specified Label, only usable
|
||||
/// after the Label has been emitted.
|
||||
///
|
||||
virtual uintptr_t getLabelAddress(MCSymbol *Label) const = 0;
|
||||
|
||||
/// Specifies the MachineModuleInfo object. This is used for exception handling
|
||||
/// purposes.
|
||||
virtual void setModuleInfo(MachineModuleInfo* Info) = 0;
|
||||
|
||||
/// getLabelLocations - Return the label locations map of the label IDs to
|
||||
/// their address.
|
||||
virtual DenseMap<MCSymbol*, uintptr_t> *getLabelLocations() { return 0; }
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
100
thirdparty/clang/include/llvm/CodeGen/LatencyPriorityQueue.h
vendored
Normal file
100
thirdparty/clang/include/llvm/CodeGen/LatencyPriorityQueue.h
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
//===---- LatencyPriorityQueue.h - A latency-oriented priority queue ------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file declares the LatencyPriorityQueue class, which is a
|
||||
// SchedulingPriorityQueue that schedules using latency information to
|
||||
// reduce the length of the critical path through the basic block.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_LATENCYPRIORITYQUEUE_H
|
||||
#define LLVM_CODEGEN_LATENCYPRIORITYQUEUE_H
|
||||
|
||||
#include "llvm/CodeGen/ScheduleDAG.h"
|
||||
|
||||
namespace llvm {
|
||||
class LatencyPriorityQueue;
|
||||
|
||||
/// Sorting functions for the Available queue.
|
||||
struct latency_sort : public std::binary_function<SUnit*, SUnit*, bool> {
|
||||
LatencyPriorityQueue *PQ;
|
||||
explicit latency_sort(LatencyPriorityQueue *pq) : PQ(pq) {}
|
||||
|
||||
bool operator()(const SUnit* left, const SUnit* right) const;
|
||||
};
|
||||
|
||||
class LatencyPriorityQueue : public SchedulingPriorityQueue {
|
||||
// SUnits - The SUnits for the current graph.
|
||||
std::vector<SUnit> *SUnits;
|
||||
|
||||
/// NumNodesSolelyBlocking - This vector contains, for every node in the
|
||||
/// Queue, the number of nodes that the node is the sole unscheduled
|
||||
/// predecessor for. This is used as a tie-breaker heuristic for better
|
||||
/// mobility.
|
||||
std::vector<unsigned> NumNodesSolelyBlocking;
|
||||
|
||||
/// Queue - The queue.
|
||||
std::vector<SUnit*> Queue;
|
||||
latency_sort Picker;
|
||||
|
||||
public:
|
||||
LatencyPriorityQueue() : Picker(this) {
|
||||
}
|
||||
|
||||
bool isBottomUp() const { return false; }
|
||||
|
||||
void initNodes(std::vector<SUnit> &sunits) {
|
||||
SUnits = &sunits;
|
||||
NumNodesSolelyBlocking.resize(SUnits->size(), 0);
|
||||
}
|
||||
|
||||
void addNode(const SUnit *SU) {
|
||||
NumNodesSolelyBlocking.resize(SUnits->size(), 0);
|
||||
}
|
||||
|
||||
void updateNode(const SUnit *SU) {
|
||||
}
|
||||
|
||||
void releaseState() {
|
||||
SUnits = 0;
|
||||
}
|
||||
|
||||
unsigned getLatency(unsigned NodeNum) const {
|
||||
assert(NodeNum < (*SUnits).size());
|
||||
return (*SUnits)[NodeNum].getHeight();
|
||||
}
|
||||
|
||||
unsigned getNumSolelyBlockNodes(unsigned NodeNum) const {
|
||||
assert(NodeNum < NumNodesSolelyBlocking.size());
|
||||
return NumNodesSolelyBlocking[NodeNum];
|
||||
}
|
||||
|
||||
bool empty() const { return Queue.empty(); }
|
||||
|
||||
virtual void push(SUnit *U);
|
||||
|
||||
virtual SUnit *pop();
|
||||
|
||||
virtual void remove(SUnit *SU);
|
||||
|
||||
virtual void dump(ScheduleDAG* DAG) const;
|
||||
|
||||
// scheduledNode - As nodes are scheduled, we look to see if there are any
|
||||
// successor nodes that have a single unscheduled predecessor. If so, that
|
||||
// single predecessor has a higher priority, since scheduling it will make
|
||||
// the node available.
|
||||
void scheduledNode(SUnit *Node);
|
||||
|
||||
private:
|
||||
void AdjustPriorityOfUnscheduledPreds(SUnit *SU);
|
||||
SUnit *getSingleUnscheduledPred(SUnit *SU);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
248
thirdparty/clang/include/llvm/CodeGen/LexicalScopes.h
vendored
Normal file
248
thirdparty/clang/include/llvm/CodeGen/LexicalScopes.h
vendored
Normal file
@@ -0,0 +1,248 @@
|
||||
//===- LexicalScopes.cpp - Collecting lexical scope info -*- C++ -*--------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file implements LexicalScopes analysis.
|
||||
//
|
||||
// This pass collects lexical scope information and maps machine instructions
|
||||
// to respective lexical scopes.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_LEXICALSCOPES_H
|
||||
#define LLVM_CODEGEN_LEXICALSCOPES_H
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/ADT/SmallPtrSet.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/IR/Metadata.h"
|
||||
#include "llvm/Support/DebugLoc.h"
|
||||
#include "llvm/Support/ValueHandle.h"
|
||||
#include <utility>
|
||||
namespace llvm {
|
||||
|
||||
class MachineInstr;
|
||||
class MachineBasicBlock;
|
||||
class MachineFunction;
|
||||
class LexicalScope;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
/// InsnRange - This is used to track range of instructions with identical
|
||||
/// lexical scope.
|
||||
///
|
||||
typedef std::pair<const MachineInstr *, const MachineInstr *> InsnRange;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
/// LexicalScopes - This class provides interface to collect and use lexical
|
||||
/// scoping information from machine instruction.
|
||||
///
|
||||
class LexicalScopes {
|
||||
public:
|
||||
LexicalScopes() : MF(NULL), CurrentFnLexicalScope(NULL) { }
|
||||
virtual ~LexicalScopes();
|
||||
|
||||
/// initialize - Scan machine function and constuct lexical scope nest.
|
||||
virtual void initialize(const MachineFunction &);
|
||||
|
||||
/// releaseMemory - release memory.
|
||||
virtual void releaseMemory();
|
||||
|
||||
/// empty - Return true if there is any lexical scope information available.
|
||||
bool empty() { return CurrentFnLexicalScope == NULL; }
|
||||
|
||||
/// isCurrentFunctionScope - Return true if given lexical scope represents
|
||||
/// current function.
|
||||
bool isCurrentFunctionScope(const LexicalScope *LS) {
|
||||
return LS == CurrentFnLexicalScope;
|
||||
}
|
||||
|
||||
/// getCurrentFunctionScope - Return lexical scope for the current function.
|
||||
LexicalScope *getCurrentFunctionScope() const { return CurrentFnLexicalScope;}
|
||||
|
||||
/// getMachineBasicBlocks - Populate given set using machine basic blocks
|
||||
/// which have machine instructions that belong to lexical scope identified by
|
||||
/// DebugLoc.
|
||||
void getMachineBasicBlocks(DebugLoc DL,
|
||||
SmallPtrSet<const MachineBasicBlock*, 4> &MBBs);
|
||||
|
||||
/// dominates - Return true if DebugLoc's lexical scope dominates at least one
|
||||
/// machine instruction's lexical scope in a given machine basic block.
|
||||
bool dominates(DebugLoc DL, MachineBasicBlock *MBB);
|
||||
|
||||
/// findLexicalScope - Find lexical scope, either regular or inlined, for the
|
||||
/// given DebugLoc. Return NULL if not found.
|
||||
LexicalScope *findLexicalScope(DebugLoc DL);
|
||||
|
||||
/// getAbstractScopesList - Return a reference to list of abstract scopes.
|
||||
ArrayRef<LexicalScope *> getAbstractScopesList() const {
|
||||
return AbstractScopesList;
|
||||
}
|
||||
|
||||
/// findAbstractScope - Find an abstract scope or return NULL.
|
||||
LexicalScope *findAbstractScope(const MDNode *N) {
|
||||
return AbstractScopeMap.lookup(N);
|
||||
}
|
||||
|
||||
/// findInlinedScope - Find an inlined scope for the given DebugLoc or return
|
||||
/// NULL.
|
||||
LexicalScope *findInlinedScope(DebugLoc DL) {
|
||||
return InlinedLexicalScopeMap.lookup(DL);
|
||||
}
|
||||
|
||||
/// findLexicalScope - Find regular lexical scope or return NULL.
|
||||
LexicalScope *findLexicalScope(const MDNode *N) {
|
||||
return LexicalScopeMap.lookup(N);
|
||||
}
|
||||
|
||||
/// dump - Print data structures to dbgs().
|
||||
void dump();
|
||||
|
||||
private:
|
||||
|
||||
/// getOrCreateLexicalScope - Find lexical scope for the given DebugLoc. If
|
||||
/// not available then create new lexical scope.
|
||||
LexicalScope *getOrCreateLexicalScope(DebugLoc DL);
|
||||
|
||||
/// getOrCreateRegularScope - Find or create a regular lexical scope.
|
||||
LexicalScope *getOrCreateRegularScope(MDNode *Scope);
|
||||
|
||||
/// getOrCreateInlinedScope - Find or create an inlined lexical scope.
|
||||
LexicalScope *getOrCreateInlinedScope(MDNode *Scope, MDNode *InlinedAt);
|
||||
|
||||
/// getOrCreateAbstractScope - Find or create an abstract lexical scope.
|
||||
LexicalScope *getOrCreateAbstractScope(const MDNode *N);
|
||||
|
||||
/// extractLexicalScopes - Extract instruction ranges for each lexical scopes
|
||||
/// for the given machine function.
|
||||
void extractLexicalScopes(SmallVectorImpl<InsnRange> &MIRanges,
|
||||
DenseMap<const MachineInstr *, LexicalScope *> &M);
|
||||
void constructScopeNest(LexicalScope *Scope);
|
||||
void assignInstructionRanges(SmallVectorImpl<InsnRange> &MIRanges,
|
||||
DenseMap<const MachineInstr *, LexicalScope *> &M);
|
||||
|
||||
private:
|
||||
const MachineFunction *MF;
|
||||
|
||||
/// LexicalScopeMap - Tracks the scopes in the current function. Owns the
|
||||
/// contained LexicalScope*s.
|
||||
DenseMap<const MDNode *, LexicalScope *> LexicalScopeMap;
|
||||
|
||||
/// InlinedLexicalScopeMap - Tracks inlined function scopes in current function.
|
||||
DenseMap<DebugLoc, LexicalScope *> InlinedLexicalScopeMap;
|
||||
|
||||
/// AbstractScopeMap - These scopes are not included LexicalScopeMap.
|
||||
/// AbstractScopes owns its LexicalScope*s.
|
||||
DenseMap<const MDNode *, LexicalScope *> AbstractScopeMap;
|
||||
|
||||
/// AbstractScopesList - Tracks abstract scopes constructed while processing
|
||||
/// a function.
|
||||
SmallVector<LexicalScope *, 4>AbstractScopesList;
|
||||
|
||||
/// CurrentFnLexicalScope - Top level scope for the current function.
|
||||
///
|
||||
LexicalScope *CurrentFnLexicalScope;
|
||||
};
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
/// LexicalScope - This class is used to track scope information.
|
||||
///
|
||||
class LexicalScope {
|
||||
virtual void anchor();
|
||||
|
||||
public:
|
||||
LexicalScope(LexicalScope *P, const MDNode *D, const MDNode *I, bool A)
|
||||
: Parent(P), Desc(D), InlinedAtLocation(I), AbstractScope(A),
|
||||
LastInsn(0), FirstInsn(0), DFSIn(0), DFSOut(0) {
|
||||
if (Parent)
|
||||
Parent->addChild(this);
|
||||
}
|
||||
|
||||
virtual ~LexicalScope() {}
|
||||
|
||||
// Accessors.
|
||||
LexicalScope *getParent() const { return Parent; }
|
||||
const MDNode *getDesc() const { return Desc; }
|
||||
const MDNode *getInlinedAt() const { return InlinedAtLocation; }
|
||||
const MDNode *getScopeNode() const { return Desc; }
|
||||
bool isAbstractScope() const { return AbstractScope; }
|
||||
SmallVector<LexicalScope *, 4> &getChildren() { return Children; }
|
||||
SmallVector<InsnRange, 4> &getRanges() { return Ranges; }
|
||||
|
||||
/// addChild - Add a child scope.
|
||||
void addChild(LexicalScope *S) { Children.push_back(S); }
|
||||
|
||||
/// openInsnRange - This scope covers instruction range starting from MI.
|
||||
void openInsnRange(const MachineInstr *MI) {
|
||||
if (!FirstInsn)
|
||||
FirstInsn = MI;
|
||||
|
||||
if (Parent)
|
||||
Parent->openInsnRange(MI);
|
||||
}
|
||||
|
||||
/// extendInsnRange - Extend the current instruction range covered by
|
||||
/// this scope.
|
||||
void extendInsnRange(const MachineInstr *MI) {
|
||||
assert (FirstInsn && "MI Range is not open!");
|
||||
LastInsn = MI;
|
||||
if (Parent)
|
||||
Parent->extendInsnRange(MI);
|
||||
}
|
||||
|
||||
/// closeInsnRange - Create a range based on FirstInsn and LastInsn collected
|
||||
/// until now. This is used when a new scope is encountered while walking
|
||||
/// machine instructions.
|
||||
void closeInsnRange(LexicalScope *NewScope = NULL) {
|
||||
assert (LastInsn && "Last insn missing!");
|
||||
Ranges.push_back(InsnRange(FirstInsn, LastInsn));
|
||||
FirstInsn = NULL;
|
||||
LastInsn = NULL;
|
||||
// If Parent dominates NewScope then do not close Parent's instruction
|
||||
// range.
|
||||
if (Parent && (!NewScope || !Parent->dominates(NewScope)))
|
||||
Parent->closeInsnRange(NewScope);
|
||||
}
|
||||
|
||||
/// dominates - Return true if current scope dominates given lexical scope.
|
||||
bool dominates(const LexicalScope *S) const {
|
||||
if (S == this)
|
||||
return true;
|
||||
if (DFSIn < S->getDFSIn() && DFSOut > S->getDFSOut())
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Depth First Search support to walk and manipulate LexicalScope hierarchy.
|
||||
unsigned getDFSOut() const { return DFSOut; }
|
||||
void setDFSOut(unsigned O) { DFSOut = O; }
|
||||
unsigned getDFSIn() const { return DFSIn; }
|
||||
void setDFSIn(unsigned I) { DFSIn = I; }
|
||||
|
||||
/// dump - print lexical scope.
|
||||
void dump(unsigned Indent = 0) const;
|
||||
|
||||
private:
|
||||
LexicalScope *Parent; // Parent to this scope.
|
||||
AssertingVH<const MDNode> Desc; // Debug info descriptor.
|
||||
AssertingVH<const MDNode> InlinedAtLocation; // Location at which this
|
||||
// scope is inlined.
|
||||
bool AbstractScope; // Abstract Scope
|
||||
SmallVector<LexicalScope *, 4> Children; // Scopes defined in scope.
|
||||
// Contents not owned.
|
||||
SmallVector<InsnRange, 4> Ranges;
|
||||
|
||||
const MachineInstr *LastInsn; // Last instruction of this scope.
|
||||
const MachineInstr *FirstInsn; // First instruction of this scope.
|
||||
unsigned DFSIn, DFSOut; // In & Out Depth use to determine
|
||||
// scope nesting.
|
||||
};
|
||||
|
||||
} // end llvm namespace
|
||||
|
||||
#endif
|
||||
38
thirdparty/clang/include/llvm/CodeGen/LinkAllAsmWriterComponents.h
vendored
Normal file
38
thirdparty/clang/include/llvm/CodeGen/LinkAllAsmWriterComponents.h
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
//===- llvm/Codegen/LinkAllAsmWriterComponents.h ----------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This header file pulls in all assembler writer related passes for tools like
|
||||
// llc that need this functionality.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H
|
||||
#define LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H
|
||||
|
||||
#include "llvm/CodeGen/GCs.h"
|
||||
#include <cstdlib>
|
||||
|
||||
namespace {
|
||||
struct ForceAsmWriterLinking {
|
||||
ForceAsmWriterLinking() {
|
||||
// We must reference the plug-ins in such a way that compilers will not
|
||||
// delete it all as dead code, even with whole program optimization,
|
||||
// yet is effectively a NO-OP. As the compiler isn't smart enough
|
||||
// to know that getenv() never returns -1, this will do the job.
|
||||
if (std::getenv("bar") != (char*) -1)
|
||||
return;
|
||||
|
||||
llvm::linkOcamlGCPrinter();
|
||||
llvm::linkErlangGCPrinter();
|
||||
|
||||
}
|
||||
} ForceAsmWriterLinking; // Force link by creating a global definition.
|
||||
}
|
||||
|
||||
#endif // LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H
|
||||
54
thirdparty/clang/include/llvm/CodeGen/LinkAllCodegenComponents.h
vendored
Normal file
54
thirdparty/clang/include/llvm/CodeGen/LinkAllCodegenComponents.h
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
//===- llvm/Codegen/LinkAllCodegenComponents.h ------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This header file pulls in all codegen related passes for tools like lli and
|
||||
// llc that need this functionality.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_LINKALLCODEGENCOMPONENTS_H
|
||||
#define LLVM_CODEGEN_LINKALLCODEGENCOMPONENTS_H
|
||||
|
||||
#include "llvm/CodeGen/GCs.h"
|
||||
#include "llvm/CodeGen/Passes.h"
|
||||
#include "llvm/CodeGen/SchedulerRegistry.h"
|
||||
#include "llvm/Target/TargetMachine.h"
|
||||
#include <cstdlib>
|
||||
|
||||
namespace {
|
||||
struct ForceCodegenLinking {
|
||||
ForceCodegenLinking() {
|
||||
// We must reference the passes in such a way that compilers will not
|
||||
// delete it all as dead code, even with whole program optimization,
|
||||
// yet is effectively a NO-OP. As the compiler isn't smart enough
|
||||
// to know that getenv() never returns -1, this will do the job.
|
||||
if (std::getenv("bar") != (char*) -1)
|
||||
return;
|
||||
|
||||
(void) llvm::createFastRegisterAllocator();
|
||||
(void) llvm::createBasicRegisterAllocator();
|
||||
(void) llvm::createGreedyRegisterAllocator();
|
||||
(void) llvm::createDefaultPBQPRegisterAllocator();
|
||||
|
||||
llvm::linkOcamlGC();
|
||||
llvm::linkErlangGC();
|
||||
llvm::linkShadowStackGC();
|
||||
|
||||
(void) llvm::createBURRListDAGScheduler(NULL, llvm::CodeGenOpt::Default);
|
||||
(void) llvm::createSourceListDAGScheduler(NULL,llvm::CodeGenOpt::Default);
|
||||
(void) llvm::createHybridListDAGScheduler(NULL,llvm::CodeGenOpt::Default);
|
||||
(void) llvm::createFastDAGScheduler(NULL, llvm::CodeGenOpt::Default);
|
||||
(void) llvm::createDefaultScheduler(NULL, llvm::CodeGenOpt::Default);
|
||||
(void) llvm::createVLIWDAGScheduler(NULL, llvm::CodeGenOpt::Default);
|
||||
|
||||
}
|
||||
} ForceCodegenLinking; // Force link by creating a global definition.
|
||||
}
|
||||
|
||||
#endif
|
||||
667
thirdparty/clang/include/llvm/CodeGen/LiveInterval.h
vendored
Normal file
667
thirdparty/clang/include/llvm/CodeGen/LiveInterval.h
vendored
Normal file
@@ -0,0 +1,667 @@
|
||||
//===-- llvm/CodeGen/LiveInterval.h - Interval representation ---*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file implements the LiveRange and LiveInterval classes. Given some
|
||||
// numbering of each the machine instructions an interval [i, j) is said to be a
|
||||
// live interval for register v if there is no instruction with number j' >= j
|
||||
// such that v is live at j' and there is no instruction with number i' < i such
|
||||
// that v is live at i'. In this implementation intervals can have holes,
|
||||
// i.e. an interval might look like [1,20), [50,65), [1000,1001). Each
|
||||
// individual range is represented as an instance of LiveRange, and the whole
|
||||
// interval is represented as an instance of LiveInterval.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_LIVEINTERVAL_H
|
||||
#define LLVM_CODEGEN_LIVEINTERVAL_H
|
||||
|
||||
#include "llvm/ADT/IntEqClasses.h"
|
||||
#include "llvm/CodeGen/SlotIndexes.h"
|
||||
#include "llvm/Support/AlignOf.h"
|
||||
#include "llvm/Support/Allocator.h"
|
||||
#include <cassert>
|
||||
#include <climits>
|
||||
|
||||
namespace llvm {
|
||||
class CoalescerPair;
|
||||
class LiveIntervals;
|
||||
class MachineInstr;
|
||||
class MachineRegisterInfo;
|
||||
class TargetRegisterInfo;
|
||||
class raw_ostream;
|
||||
|
||||
/// VNInfo - Value Number Information.
|
||||
/// This class holds information about a machine level values, including
|
||||
/// definition and use points.
|
||||
///
|
||||
class VNInfo {
|
||||
public:
|
||||
typedef BumpPtrAllocator Allocator;
|
||||
|
||||
/// The ID number of this value.
|
||||
unsigned id;
|
||||
|
||||
/// The index of the defining instruction.
|
||||
SlotIndex def;
|
||||
|
||||
/// VNInfo constructor.
|
||||
VNInfo(unsigned i, SlotIndex d)
|
||||
: id(i), def(d)
|
||||
{ }
|
||||
|
||||
/// VNInfo construtor, copies values from orig, except for the value number.
|
||||
VNInfo(unsigned i, const VNInfo &orig)
|
||||
: id(i), def(orig.def)
|
||||
{ }
|
||||
|
||||
/// Copy from the parameter into this VNInfo.
|
||||
void copyFrom(VNInfo &src) {
|
||||
def = src.def;
|
||||
}
|
||||
|
||||
/// Returns true if this value is defined by a PHI instruction (or was,
|
||||
/// PHI instrucions may have been eliminated).
|
||||
/// PHI-defs begin at a block boundary, all other defs begin at register or
|
||||
/// EC slots.
|
||||
bool isPHIDef() const { return def.isBlock(); }
|
||||
|
||||
/// Returns true if this value is unused.
|
||||
bool isUnused() const { return !def.isValid(); }
|
||||
|
||||
/// Mark this value as unused.
|
||||
void markUnused() { def = SlotIndex(); }
|
||||
};
|
||||
|
||||
/// LiveRange structure - This represents a simple register range in the
|
||||
/// program, with an inclusive start point and an exclusive end point.
|
||||
/// These ranges are rendered as [start,end).
|
||||
struct LiveRange {
|
||||
SlotIndex start; // Start point of the interval (inclusive)
|
||||
SlotIndex end; // End point of the interval (exclusive)
|
||||
VNInfo *valno; // identifier for the value contained in this interval.
|
||||
|
||||
LiveRange() : valno(0) {}
|
||||
|
||||
LiveRange(SlotIndex S, SlotIndex E, VNInfo *V)
|
||||
: start(S), end(E), valno(V) {
|
||||
assert(S < E && "Cannot create empty or backwards range");
|
||||
}
|
||||
|
||||
/// contains - Return true if the index is covered by this range.
|
||||
///
|
||||
bool contains(SlotIndex I) const {
|
||||
return start <= I && I < end;
|
||||
}
|
||||
|
||||
/// containsRange - Return true if the given range, [S, E), is covered by
|
||||
/// this range.
|
||||
bool containsRange(SlotIndex S, SlotIndex E) const {
|
||||
assert((S < E) && "Backwards interval?");
|
||||
return (start <= S && S < end) && (start < E && E <= end);
|
||||
}
|
||||
|
||||
bool operator<(const LiveRange &LR) const {
|
||||
return start < LR.start || (start == LR.start && end < LR.end);
|
||||
}
|
||||
bool operator==(const LiveRange &LR) const {
|
||||
return start == LR.start && end == LR.end;
|
||||
}
|
||||
|
||||
void dump() const;
|
||||
void print(raw_ostream &os) const;
|
||||
};
|
||||
|
||||
template <> struct isPodLike<LiveRange> { static const bool value = true; };
|
||||
|
||||
raw_ostream& operator<<(raw_ostream& os, const LiveRange &LR);
|
||||
|
||||
|
||||
inline bool operator<(SlotIndex V, const LiveRange &LR) {
|
||||
return V < LR.start;
|
||||
}
|
||||
|
||||
inline bool operator<(const LiveRange &LR, SlotIndex V) {
|
||||
return LR.start < V;
|
||||
}
|
||||
|
||||
/// LiveInterval - This class represents some number of live ranges for a
|
||||
/// register or value. This class also contains a bit of register allocator
|
||||
/// state.
|
||||
class LiveInterval {
|
||||
public:
|
||||
|
||||
typedef SmallVector<LiveRange,4> Ranges;
|
||||
typedef SmallVector<VNInfo*,4> VNInfoList;
|
||||
|
||||
const unsigned reg; // the register or stack slot of this interval.
|
||||
float weight; // weight of this interval
|
||||
Ranges ranges; // the ranges in which this register is live
|
||||
VNInfoList valnos; // value#'s
|
||||
|
||||
struct InstrSlots {
|
||||
enum {
|
||||
LOAD = 0,
|
||||
USE = 1,
|
||||
DEF = 2,
|
||||
STORE = 3,
|
||||
NUM = 4
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
LiveInterval(unsigned Reg, float Weight)
|
||||
: reg(Reg), weight(Weight) {}
|
||||
|
||||
typedef Ranges::iterator iterator;
|
||||
iterator begin() { return ranges.begin(); }
|
||||
iterator end() { return ranges.end(); }
|
||||
|
||||
typedef Ranges::const_iterator const_iterator;
|
||||
const_iterator begin() const { return ranges.begin(); }
|
||||
const_iterator end() const { return ranges.end(); }
|
||||
|
||||
typedef VNInfoList::iterator vni_iterator;
|
||||
vni_iterator vni_begin() { return valnos.begin(); }
|
||||
vni_iterator vni_end() { return valnos.end(); }
|
||||
|
||||
typedef VNInfoList::const_iterator const_vni_iterator;
|
||||
const_vni_iterator vni_begin() const { return valnos.begin(); }
|
||||
const_vni_iterator vni_end() const { return valnos.end(); }
|
||||
|
||||
/// advanceTo - Advance the specified iterator to point to the LiveRange
|
||||
/// containing the specified position, or end() if the position is past the
|
||||
/// end of the interval. If no LiveRange contains this position, but the
|
||||
/// position is in a hole, this method returns an iterator pointing to the
|
||||
/// LiveRange immediately after the hole.
|
||||
iterator advanceTo(iterator I, SlotIndex Pos) {
|
||||
assert(I != end());
|
||||
if (Pos >= endIndex())
|
||||
return end();
|
||||
while (I->end <= Pos) ++I;
|
||||
return I;
|
||||
}
|
||||
|
||||
/// find - Return an iterator pointing to the first range that ends after
|
||||
/// Pos, or end(). This is the same as advanceTo(begin(), Pos), but faster
|
||||
/// when searching large intervals.
|
||||
///
|
||||
/// If Pos is contained in a LiveRange, that range is returned.
|
||||
/// If Pos is in a hole, the following LiveRange is returned.
|
||||
/// If Pos is beyond endIndex, end() is returned.
|
||||
iterator find(SlotIndex Pos);
|
||||
|
||||
const_iterator find(SlotIndex Pos) const {
|
||||
return const_cast<LiveInterval*>(this)->find(Pos);
|
||||
}
|
||||
|
||||
void clear() {
|
||||
valnos.clear();
|
||||
ranges.clear();
|
||||
}
|
||||
|
||||
bool hasAtLeastOneValue() const { return !valnos.empty(); }
|
||||
|
||||
bool containsOneValue() const { return valnos.size() == 1; }
|
||||
|
||||
unsigned getNumValNums() const { return (unsigned)valnos.size(); }
|
||||
|
||||
/// getValNumInfo - Returns pointer to the specified val#.
|
||||
///
|
||||
inline VNInfo *getValNumInfo(unsigned ValNo) {
|
||||
return valnos[ValNo];
|
||||
}
|
||||
inline const VNInfo *getValNumInfo(unsigned ValNo) const {
|
||||
return valnos[ValNo];
|
||||
}
|
||||
|
||||
/// containsValue - Returns true if VNI belongs to this interval.
|
||||
bool containsValue(const VNInfo *VNI) const {
|
||||
return VNI && VNI->id < getNumValNums() && VNI == getValNumInfo(VNI->id);
|
||||
}
|
||||
|
||||
/// getNextValue - Create a new value number and return it. MIIdx specifies
|
||||
/// the instruction that defines the value number.
|
||||
VNInfo *getNextValue(SlotIndex def, VNInfo::Allocator &VNInfoAllocator) {
|
||||
VNInfo *VNI =
|
||||
new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), def);
|
||||
valnos.push_back(VNI);
|
||||
return VNI;
|
||||
}
|
||||
|
||||
/// createDeadDef - Make sure the interval has a value defined at Def.
|
||||
/// If one already exists, return it. Otherwise allocate a new value and
|
||||
/// add liveness for a dead def.
|
||||
VNInfo *createDeadDef(SlotIndex Def, VNInfo::Allocator &VNInfoAllocator);
|
||||
|
||||
/// Create a copy of the given value. The new value will be identical except
|
||||
/// for the Value number.
|
||||
VNInfo *createValueCopy(const VNInfo *orig,
|
||||
VNInfo::Allocator &VNInfoAllocator) {
|
||||
VNInfo *VNI =
|
||||
new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), *orig);
|
||||
valnos.push_back(VNI);
|
||||
return VNI;
|
||||
}
|
||||
|
||||
/// RenumberValues - Renumber all values in order of appearance and remove
|
||||
/// unused values.
|
||||
void RenumberValues(LiveIntervals &lis);
|
||||
|
||||
/// MergeValueNumberInto - This method is called when two value nubmers
|
||||
/// are found to be equivalent. This eliminates V1, replacing all
|
||||
/// LiveRanges with the V1 value number with the V2 value number. This can
|
||||
/// cause merging of V1/V2 values numbers and compaction of the value space.
|
||||
VNInfo* MergeValueNumberInto(VNInfo *V1, VNInfo *V2);
|
||||
|
||||
/// MergeValueInAsValue - Merge all of the live ranges of a specific val#
|
||||
/// in RHS into this live interval as the specified value number.
|
||||
/// The LiveRanges in RHS are allowed to overlap with LiveRanges in the
|
||||
/// current interval, it will replace the value numbers of the overlaped
|
||||
/// live ranges with the specified value number.
|
||||
void MergeRangesInAsValue(const LiveInterval &RHS, VNInfo *LHSValNo);
|
||||
|
||||
/// MergeValueInAsValue - Merge all of the live ranges of a specific val#
|
||||
/// in RHS into this live interval as the specified value number.
|
||||
/// The LiveRanges in RHS are allowed to overlap with LiveRanges in the
|
||||
/// current interval, but only if the overlapping LiveRanges have the
|
||||
/// specified value number.
|
||||
void MergeValueInAsValue(const LiveInterval &RHS,
|
||||
const VNInfo *RHSValNo, VNInfo *LHSValNo);
|
||||
|
||||
bool empty() const { return ranges.empty(); }
|
||||
|
||||
/// beginIndex - Return the lowest numbered slot covered by interval.
|
||||
SlotIndex beginIndex() const {
|
||||
assert(!empty() && "Call to beginIndex() on empty interval.");
|
||||
return ranges.front().start;
|
||||
}
|
||||
|
||||
/// endNumber - return the maximum point of the interval of the whole,
|
||||
/// exclusive.
|
||||
SlotIndex endIndex() const {
|
||||
assert(!empty() && "Call to endIndex() on empty interval.");
|
||||
return ranges.back().end;
|
||||
}
|
||||
|
||||
bool expiredAt(SlotIndex index) const {
|
||||
return index >= endIndex();
|
||||
}
|
||||
|
||||
bool liveAt(SlotIndex index) const {
|
||||
const_iterator r = find(index);
|
||||
return r != end() && r->start <= index;
|
||||
}
|
||||
|
||||
/// killedAt - Return true if a live range ends at index. Note that the kill
|
||||
/// point is not contained in the half-open live range. It is usually the
|
||||
/// getDefIndex() slot following its last use.
|
||||
bool killedAt(SlotIndex index) const {
|
||||
const_iterator r = find(index.getRegSlot(true));
|
||||
return r != end() && r->end == index;
|
||||
}
|
||||
|
||||
/// getLiveRangeContaining - Return the live range that contains the
|
||||
/// specified index, or null if there is none.
|
||||
const LiveRange *getLiveRangeContaining(SlotIndex Idx) const {
|
||||
const_iterator I = FindLiveRangeContaining(Idx);
|
||||
return I == end() ? 0 : &*I;
|
||||
}
|
||||
|
||||
/// getLiveRangeContaining - Return the live range that contains the
|
||||
/// specified index, or null if there is none.
|
||||
LiveRange *getLiveRangeContaining(SlotIndex Idx) {
|
||||
iterator I = FindLiveRangeContaining(Idx);
|
||||
return I == end() ? 0 : &*I;
|
||||
}
|
||||
|
||||
/// getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
|
||||
VNInfo *getVNInfoAt(SlotIndex Idx) const {
|
||||
const_iterator I = FindLiveRangeContaining(Idx);
|
||||
return I == end() ? 0 : I->valno;
|
||||
}
|
||||
|
||||
/// getVNInfoBefore - Return the VNInfo that is live up to but not
|
||||
/// necessarilly including Idx, or NULL. Use this to find the reaching def
|
||||
/// used by an instruction at this SlotIndex position.
|
||||
VNInfo *getVNInfoBefore(SlotIndex Idx) const {
|
||||
const_iterator I = FindLiveRangeContaining(Idx.getPrevSlot());
|
||||
return I == end() ? 0 : I->valno;
|
||||
}
|
||||
|
||||
/// FindLiveRangeContaining - Return an iterator to the live range that
|
||||
/// contains the specified index, or end() if there is none.
|
||||
iterator FindLiveRangeContaining(SlotIndex Idx) {
|
||||
iterator I = find(Idx);
|
||||
return I != end() && I->start <= Idx ? I : end();
|
||||
}
|
||||
|
||||
const_iterator FindLiveRangeContaining(SlotIndex Idx) const {
|
||||
const_iterator I = find(Idx);
|
||||
return I != end() && I->start <= Idx ? I : end();
|
||||
}
|
||||
|
||||
/// overlaps - Return true if the intersection of the two live intervals is
|
||||
/// not empty.
|
||||
bool overlaps(const LiveInterval& other) const {
|
||||
if (other.empty())
|
||||
return false;
|
||||
return overlapsFrom(other, other.begin());
|
||||
}
|
||||
|
||||
/// overlaps - Return true if the two intervals have overlapping segments
|
||||
/// that are not coalescable according to CP.
|
||||
///
|
||||
/// Overlapping segments where one interval is defined by a coalescable
|
||||
/// copy are allowed.
|
||||
bool overlaps(const LiveInterval &Other, const CoalescerPair &CP,
|
||||
const SlotIndexes&) const;
|
||||
|
||||
/// overlaps - Return true if the live interval overlaps a range specified
|
||||
/// by [Start, End).
|
||||
bool overlaps(SlotIndex Start, SlotIndex End) const;
|
||||
|
||||
/// overlapsFrom - Return true if the intersection of the two live intervals
|
||||
/// is not empty. The specified iterator is a hint that we can begin
|
||||
/// scanning the Other interval starting at I.
|
||||
bool overlapsFrom(const LiveInterval& other, const_iterator I) const;
|
||||
|
||||
/// addRange - Add the specified LiveRange to this interval, merging
|
||||
/// intervals as appropriate. This returns an iterator to the inserted live
|
||||
/// range (which may have grown since it was inserted.
|
||||
iterator addRange(LiveRange LR) {
|
||||
return addRangeFrom(LR, ranges.begin());
|
||||
}
|
||||
|
||||
/// extendInBlock - If this interval is live before Kill in the basic block
|
||||
/// that starts at StartIdx, extend it to be live up to Kill, and return
|
||||
/// the value. If there is no live range before Kill, return NULL.
|
||||
VNInfo *extendInBlock(SlotIndex StartIdx, SlotIndex Kill);
|
||||
|
||||
/// join - Join two live intervals (this, and other) together. This applies
|
||||
/// mappings to the value numbers in the LHS/RHS intervals as specified. If
|
||||
/// the intervals are not joinable, this aborts.
|
||||
void join(LiveInterval &Other,
|
||||
const int *ValNoAssignments,
|
||||
const int *RHSValNoAssignments,
|
||||
SmallVector<VNInfo*, 16> &NewVNInfo,
|
||||
MachineRegisterInfo *MRI);
|
||||
|
||||
/// isInOneLiveRange - Return true if the range specified is entirely in the
|
||||
/// a single LiveRange of the live interval.
|
||||
bool isInOneLiveRange(SlotIndex Start, SlotIndex End) const {
|
||||
const_iterator r = find(Start);
|
||||
return r != end() && r->containsRange(Start, End);
|
||||
}
|
||||
|
||||
/// removeRange - Remove the specified range from this interval. Note that
|
||||
/// the range must be a single LiveRange in its entirety.
|
||||
void removeRange(SlotIndex Start, SlotIndex End,
|
||||
bool RemoveDeadValNo = false);
|
||||
|
||||
void removeRange(LiveRange LR, bool RemoveDeadValNo = false) {
|
||||
removeRange(LR.start, LR.end, RemoveDeadValNo);
|
||||
}
|
||||
|
||||
/// removeValNo - Remove all the ranges defined by the specified value#.
|
||||
/// Also remove the value# from value# list.
|
||||
void removeValNo(VNInfo *ValNo);
|
||||
|
||||
/// getSize - Returns the sum of sizes of all the LiveRange's.
|
||||
///
|
||||
unsigned getSize() const;
|
||||
|
||||
/// Returns true if the live interval is zero length, i.e. no live ranges
|
||||
/// span instructions. It doesn't pay to spill such an interval.
|
||||
bool isZeroLength(SlotIndexes *Indexes) const {
|
||||
for (const_iterator i = begin(), e = end(); i != e; ++i)
|
||||
if (Indexes->getNextNonNullIndex(i->start).getBaseIndex() <
|
||||
i->end.getBaseIndex())
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/// isSpillable - Can this interval be spilled?
|
||||
bool isSpillable() const {
|
||||
return weight != HUGE_VALF;
|
||||
}
|
||||
|
||||
/// markNotSpillable - Mark interval as not spillable
|
||||
void markNotSpillable() {
|
||||
weight = HUGE_VALF;
|
||||
}
|
||||
|
||||
bool operator<(const LiveInterval& other) const {
|
||||
const SlotIndex &thisIndex = beginIndex();
|
||||
const SlotIndex &otherIndex = other.beginIndex();
|
||||
return (thisIndex < otherIndex ||
|
||||
(thisIndex == otherIndex && reg < other.reg));
|
||||
}
|
||||
|
||||
void print(raw_ostream &OS) const;
|
||||
void dump() const;
|
||||
|
||||
/// \brief Walk the interval and assert if any invariants fail to hold.
|
||||
///
|
||||
/// Note that this is a no-op when asserts are disabled.
|
||||
#ifdef NDEBUG
|
||||
void verify() const {}
|
||||
#else
|
||||
void verify() const;
|
||||
#endif
|
||||
|
||||
private:
|
||||
|
||||
Ranges::iterator addRangeFrom(LiveRange LR, Ranges::iterator From);
|
||||
void extendIntervalEndTo(Ranges::iterator I, SlotIndex NewEnd);
|
||||
Ranges::iterator extendIntervalStartTo(Ranges::iterator I, SlotIndex NewStr);
|
||||
void markValNoForDeletion(VNInfo *V);
|
||||
|
||||
LiveInterval& operator=(const LiveInterval& rhs) LLVM_DELETED_FUNCTION;
|
||||
|
||||
};
|
||||
|
||||
inline raw_ostream &operator<<(raw_ostream &OS, const LiveInterval &LI) {
|
||||
LI.print(OS);
|
||||
return OS;
|
||||
}
|
||||
|
||||
/// Helper class for performant LiveInterval bulk updates.
|
||||
///
|
||||
/// Calling LiveInterval::addRange() repeatedly can be expensive on large
|
||||
/// live ranges because segments after the insertion point may need to be
|
||||
/// shifted. The LiveRangeUpdater class can defer the shifting when adding
|
||||
/// many segments in order.
|
||||
///
|
||||
/// The LiveInterval will be in an invalid state until flush() is called.
|
||||
class LiveRangeUpdater {
|
||||
LiveInterval *LI;
|
||||
SlotIndex LastStart;
|
||||
LiveInterval::iterator WriteI;
|
||||
LiveInterval::iterator ReadI;
|
||||
SmallVector<LiveRange, 16> Spills;
|
||||
void mergeSpills();
|
||||
|
||||
public:
|
||||
/// Create a LiveRangeUpdater for adding segments to LI.
|
||||
/// LI will temporarily be in an invalid state until flush() is called.
|
||||
LiveRangeUpdater(LiveInterval *li = 0) : LI(li) {}
|
||||
|
||||
~LiveRangeUpdater() { flush(); }
|
||||
|
||||
/// Add a segment to LI and coalesce when possible, just like LI.addRange().
|
||||
/// Segments should be added in increasing start order for best performance.
|
||||
void add(LiveRange);
|
||||
|
||||
void add(SlotIndex Start, SlotIndex End, VNInfo *VNI) {
|
||||
add(LiveRange(Start, End, VNI));
|
||||
}
|
||||
|
||||
/// Return true if the LI is currently in an invalid state, and flush()
|
||||
/// needs to be called.
|
||||
bool isDirty() const { return LastStart.isValid(); }
|
||||
|
||||
/// Flush the updater state to LI so it is valid and contains all added
|
||||
/// segments.
|
||||
void flush();
|
||||
|
||||
/// Select a different destination live range.
|
||||
void setDest(LiveInterval *li) {
|
||||
if (LI != li && isDirty())
|
||||
flush();
|
||||
LI = li;
|
||||
}
|
||||
|
||||
/// Get the current destination live range.
|
||||
LiveInterval *getDest() const { return LI; }
|
||||
|
||||
void dump() const;
|
||||
void print(raw_ostream&) const;
|
||||
};
|
||||
|
||||
inline raw_ostream &operator<<(raw_ostream &OS, const LiveRangeUpdater &X) {
|
||||
X.print(OS);
|
||||
return OS;
|
||||
}
|
||||
|
||||
/// LiveRangeQuery - Query information about a live range around a given
|
||||
/// instruction. This class hides the implementation details of live ranges,
|
||||
/// and it should be used as the primary interface for examining live ranges
|
||||
/// around instructions.
|
||||
///
|
||||
class LiveRangeQuery {
|
||||
VNInfo *EarlyVal;
|
||||
VNInfo *LateVal;
|
||||
SlotIndex EndPoint;
|
||||
bool Kill;
|
||||
|
||||
public:
|
||||
/// Create a LiveRangeQuery for the given live range and instruction index.
|
||||
/// The sub-instruction slot of Idx doesn't matter, only the instruction it
|
||||
/// refers to is considered.
|
||||
LiveRangeQuery(const LiveInterval &LI, SlotIndex Idx)
|
||||
: EarlyVal(0), LateVal(0), Kill(false) {
|
||||
// Find the segment that enters the instruction.
|
||||
LiveInterval::const_iterator I = LI.find(Idx.getBaseIndex());
|
||||
LiveInterval::const_iterator E = LI.end();
|
||||
if (I == E)
|
||||
return;
|
||||
// Is this an instruction live-in segment?
|
||||
// If Idx is the start index of a basic block, include live-in segments
|
||||
// that start at Idx.getBaseIndex().
|
||||
if (I->start <= Idx.getBaseIndex()) {
|
||||
EarlyVal = I->valno;
|
||||
EndPoint = I->end;
|
||||
// Move to the potentially live-out segment.
|
||||
if (SlotIndex::isSameInstr(Idx, I->end)) {
|
||||
Kill = true;
|
||||
if (++I == E)
|
||||
return;
|
||||
}
|
||||
// Special case: A PHIDef value can have its def in the middle of a
|
||||
// segment if the value happens to be live out of the layout
|
||||
// predecessor.
|
||||
// Such a value is not live-in.
|
||||
if (EarlyVal->def == Idx.getBaseIndex())
|
||||
EarlyVal = 0;
|
||||
}
|
||||
// I now points to the segment that may be live-through, or defined by
|
||||
// this instr. Ignore segments starting after the current instr.
|
||||
if (SlotIndex::isEarlierInstr(Idx, I->start))
|
||||
return;
|
||||
LateVal = I->valno;
|
||||
EndPoint = I->end;
|
||||
}
|
||||
|
||||
/// Return the value that is live-in to the instruction. This is the value
|
||||
/// that will be read by the instruction's use operands. Return NULL if no
|
||||
/// value is live-in.
|
||||
VNInfo *valueIn() const {
|
||||
return EarlyVal;
|
||||
}
|
||||
|
||||
/// Return true if the live-in value is killed by this instruction. This
|
||||
/// means that either the live range ends at the instruction, or it changes
|
||||
/// value.
|
||||
bool isKill() const {
|
||||
return Kill;
|
||||
}
|
||||
|
||||
/// Return true if this instruction has a dead def.
|
||||
bool isDeadDef() const {
|
||||
return EndPoint.isDead();
|
||||
}
|
||||
|
||||
/// Return the value leaving the instruction, if any. This can be a
|
||||
/// live-through value, or a live def. A dead def returns NULL.
|
||||
VNInfo *valueOut() const {
|
||||
return isDeadDef() ? 0 : LateVal;
|
||||
}
|
||||
|
||||
/// Return the value defined by this instruction, if any. This includes
|
||||
/// dead defs, it is the value created by the instruction's def operands.
|
||||
VNInfo *valueDefined() const {
|
||||
return EarlyVal == LateVal ? 0 : LateVal;
|
||||
}
|
||||
|
||||
/// Return the end point of the last live range segment to interact with
|
||||
/// the instruction, if any.
|
||||
///
|
||||
/// The end point is an invalid SlotIndex only if the live range doesn't
|
||||
/// intersect the instruction at all.
|
||||
///
|
||||
/// The end point may be at or past the end of the instruction's basic
|
||||
/// block. That means the value was live out of the block.
|
||||
SlotIndex endPoint() const {
|
||||
return EndPoint;
|
||||
}
|
||||
};
|
||||
|
||||
/// ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a
|
||||
/// LiveInterval into equivalence clases of connected components. A
|
||||
/// LiveInterval that has multiple connected components can be broken into
|
||||
/// multiple LiveIntervals.
|
||||
///
|
||||
/// Given a LiveInterval that may have multiple connected components, run:
|
||||
///
|
||||
/// unsigned numComps = ConEQ.Classify(LI);
|
||||
/// if (numComps > 1) {
|
||||
/// // allocate numComps-1 new LiveIntervals into LIS[1..]
|
||||
/// ConEQ.Distribute(LIS);
|
||||
/// }
|
||||
|
||||
class ConnectedVNInfoEqClasses {
|
||||
LiveIntervals &LIS;
|
||||
IntEqClasses EqClass;
|
||||
|
||||
// Note that values a and b are connected.
|
||||
void Connect(unsigned a, unsigned b);
|
||||
|
||||
unsigned Renumber();
|
||||
|
||||
public:
|
||||
explicit ConnectedVNInfoEqClasses(LiveIntervals &lis) : LIS(lis) {}
|
||||
|
||||
/// Classify - Classify the values in LI into connected components.
|
||||
/// Return the number of connected components.
|
||||
unsigned Classify(const LiveInterval *LI);
|
||||
|
||||
/// getEqClass - Classify creates equivalence classes numbered 0..N. Return
|
||||
/// the equivalence class assigned the VNI.
|
||||
unsigned getEqClass(const VNInfo *VNI) const { return EqClass[VNI->id]; }
|
||||
|
||||
/// Distribute - Distribute values in LIV[0] into a separate LiveInterval
|
||||
/// for each connected component. LIV must have a LiveInterval for each
|
||||
/// connected component. The LiveIntervals in Liv[1..] must be empty.
|
||||
/// Instructions using LIV[0] are rewritten.
|
||||
void Distribute(LiveInterval *LIV[], MachineRegisterInfo &MRI);
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
#endif
|
||||
394
thirdparty/clang/include/llvm/CodeGen/LiveIntervalAnalysis.h
vendored
Normal file
394
thirdparty/clang/include/llvm/CodeGen/LiveIntervalAnalysis.h
vendored
Normal file
@@ -0,0 +1,394 @@
|
||||
//===-- LiveIntervalAnalysis.h - Live Interval Analysis ---------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file implements the LiveInterval analysis pass. Given some numbering of
|
||||
// each the machine instructions (in this implemention depth-first order) an
|
||||
// interval [i, j) is said to be a live interval for register v if there is no
|
||||
// instruction with number j' > j such that v is live at j' and there is no
|
||||
// instruction with number i' < i such that v is live at i'. In this
|
||||
// implementation intervals can have holes, i.e. an interval might look like
|
||||
// [1,20), [50,65), [1000,1001).
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_LIVEINTERVAL_ANALYSIS_H
|
||||
#define LLVM_CODEGEN_LIVEINTERVAL_ANALYSIS_H
|
||||
|
||||
#include "llvm/ADT/IndexedMap.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/CodeGen/LiveInterval.h"
|
||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
#include "llvm/CodeGen/SlotIndexes.h"
|
||||
#include "llvm/Support/Allocator.h"
|
||||
#include "llvm/Target/TargetRegisterInfo.h"
|
||||
#include <cmath>
|
||||
#include <iterator>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class AliasAnalysis;
|
||||
class BitVector;
|
||||
class LiveRangeCalc;
|
||||
class LiveVariables;
|
||||
class MachineDominatorTree;
|
||||
class MachineLoopInfo;
|
||||
class TargetRegisterInfo;
|
||||
class MachineRegisterInfo;
|
||||
class TargetInstrInfo;
|
||||
class TargetRegisterClass;
|
||||
class VirtRegMap;
|
||||
|
||||
class LiveIntervals : public MachineFunctionPass {
|
||||
MachineFunction* MF;
|
||||
MachineRegisterInfo* MRI;
|
||||
const TargetMachine* TM;
|
||||
const TargetRegisterInfo* TRI;
|
||||
const TargetInstrInfo* TII;
|
||||
AliasAnalysis *AA;
|
||||
SlotIndexes* Indexes;
|
||||
MachineDominatorTree *DomTree;
|
||||
LiveRangeCalc *LRCalc;
|
||||
|
||||
/// Special pool allocator for VNInfo's (LiveInterval val#).
|
||||
///
|
||||
VNInfo::Allocator VNInfoAllocator;
|
||||
|
||||
/// Live interval pointers for all the virtual registers.
|
||||
IndexedMap<LiveInterval*, VirtReg2IndexFunctor> VirtRegIntervals;
|
||||
|
||||
/// RegMaskSlots - Sorted list of instructions with register mask operands.
|
||||
/// Always use the 'r' slot, RegMasks are normal clobbers, not early
|
||||
/// clobbers.
|
||||
SmallVector<SlotIndex, 8> RegMaskSlots;
|
||||
|
||||
/// RegMaskBits - This vector is parallel to RegMaskSlots, it holds a
|
||||
/// pointer to the corresponding register mask. This pointer can be
|
||||
/// recomputed as:
|
||||
///
|
||||
/// MI = Indexes->getInstructionFromIndex(RegMaskSlot[N]);
|
||||
/// unsigned OpNum = findRegMaskOperand(MI);
|
||||
/// RegMaskBits[N] = MI->getOperand(OpNum).getRegMask();
|
||||
///
|
||||
/// This is kept in a separate vector partly because some standard
|
||||
/// libraries don't support lower_bound() with mixed objects, partly to
|
||||
/// improve locality when searching in RegMaskSlots.
|
||||
/// Also see the comment in LiveInterval::find().
|
||||
SmallVector<const uint32_t*, 8> RegMaskBits;
|
||||
|
||||
/// For each basic block number, keep (begin, size) pairs indexing into the
|
||||
/// RegMaskSlots and RegMaskBits arrays.
|
||||
/// Note that basic block numbers may not be layout contiguous, that's why
|
||||
/// we can't just keep track of the first register mask in each basic
|
||||
/// block.
|
||||
SmallVector<std::pair<unsigned, unsigned>, 8> RegMaskBlocks;
|
||||
|
||||
/// RegUnitIntervals - Keep a live interval for each register unit as a way
|
||||
/// of tracking fixed physreg interference.
|
||||
SmallVector<LiveInterval*, 0> RegUnitIntervals;
|
||||
|
||||
public:
|
||||
static char ID; // Pass identification, replacement for typeid
|
||||
LiveIntervals();
|
||||
virtual ~LiveIntervals();
|
||||
|
||||
// Calculate the spill weight to assign to a single instruction.
|
||||
static float getSpillWeight(bool isDef, bool isUse, unsigned loopDepth);
|
||||
|
||||
LiveInterval &getInterval(unsigned Reg) {
|
||||
LiveInterval *LI = VirtRegIntervals[Reg];
|
||||
assert(LI && "Interval does not exist for virtual register");
|
||||
return *LI;
|
||||
}
|
||||
|
||||
const LiveInterval &getInterval(unsigned Reg) const {
|
||||
return const_cast<LiveIntervals*>(this)->getInterval(Reg);
|
||||
}
|
||||
|
||||
bool hasInterval(unsigned Reg) const {
|
||||
return VirtRegIntervals.inBounds(Reg) && VirtRegIntervals[Reg];
|
||||
}
|
||||
|
||||
// Interval creation.
|
||||
LiveInterval &getOrCreateInterval(unsigned Reg) {
|
||||
if (!hasInterval(Reg)) {
|
||||
VirtRegIntervals.grow(Reg);
|
||||
VirtRegIntervals[Reg] = createInterval(Reg);
|
||||
}
|
||||
return getInterval(Reg);
|
||||
}
|
||||
|
||||
// Interval removal.
|
||||
void removeInterval(unsigned Reg) {
|
||||
delete VirtRegIntervals[Reg];
|
||||
VirtRegIntervals[Reg] = 0;
|
||||
}
|
||||
|
||||
/// addLiveRangeToEndOfBlock - Given a register and an instruction,
|
||||
/// adds a live range from that instruction to the end of its MBB.
|
||||
LiveRange addLiveRangeToEndOfBlock(unsigned reg,
|
||||
MachineInstr* startInst);
|
||||
|
||||
/// shrinkToUses - After removing some uses of a register, shrink its live
|
||||
/// range to just the remaining uses. This method does not compute reaching
|
||||
/// defs for new uses, and it doesn't remove dead defs.
|
||||
/// Dead PHIDef values are marked as unused.
|
||||
/// New dead machine instructions are added to the dead vector.
|
||||
/// Return true if the interval may have been separated into multiple
|
||||
/// connected components.
|
||||
bool shrinkToUses(LiveInterval *li,
|
||||
SmallVectorImpl<MachineInstr*> *dead = 0);
|
||||
|
||||
/// extendToIndices - Extend the live range of LI to reach all points in
|
||||
/// Indices. The points in the Indices array must be jointly dominated by
|
||||
/// existing defs in LI. PHI-defs are added as needed to maintain SSA form.
|
||||
///
|
||||
/// If a SlotIndex in Indices is the end index of a basic block, LI will be
|
||||
/// extended to be live out of the basic block.
|
||||
///
|
||||
/// See also LiveRangeCalc::extend().
|
||||
void extendToIndices(LiveInterval *LI, ArrayRef<SlotIndex> Indices);
|
||||
|
||||
/// pruneValue - If an LI value is live at Kill, prune its live range by
|
||||
/// removing any liveness reachable from Kill. Add live range end points to
|
||||
/// EndPoints such that extendToIndices(LI, EndPoints) will reconstruct the
|
||||
/// value's live range.
|
||||
///
|
||||
/// Calling pruneValue() and extendToIndices() can be used to reconstruct
|
||||
/// SSA form after adding defs to a virtual register.
|
||||
void pruneValue(LiveInterval *LI, SlotIndex Kill,
|
||||
SmallVectorImpl<SlotIndex> *EndPoints);
|
||||
|
||||
SlotIndexes *getSlotIndexes() const {
|
||||
return Indexes;
|
||||
}
|
||||
|
||||
AliasAnalysis *getAliasAnalysis() const {
|
||||
return AA;
|
||||
}
|
||||
|
||||
/// isNotInMIMap - returns true if the specified machine instr has been
|
||||
/// removed or was never entered in the map.
|
||||
bool isNotInMIMap(const MachineInstr* Instr) const {
|
||||
return !Indexes->hasIndex(Instr);
|
||||
}
|
||||
|
||||
/// Returns the base index of the given instruction.
|
||||
SlotIndex getInstructionIndex(const MachineInstr *instr) const {
|
||||
return Indexes->getInstructionIndex(instr);
|
||||
}
|
||||
|
||||
/// Returns the instruction associated with the given index.
|
||||
MachineInstr* getInstructionFromIndex(SlotIndex index) const {
|
||||
return Indexes->getInstructionFromIndex(index);
|
||||
}
|
||||
|
||||
/// Return the first index in the given basic block.
|
||||
SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const {
|
||||
return Indexes->getMBBStartIdx(mbb);
|
||||
}
|
||||
|
||||
/// Return the last index in the given basic block.
|
||||
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const {
|
||||
return Indexes->getMBBEndIdx(mbb);
|
||||
}
|
||||
|
||||
bool isLiveInToMBB(const LiveInterval &li,
|
||||
const MachineBasicBlock *mbb) const {
|
||||
return li.liveAt(getMBBStartIdx(mbb));
|
||||
}
|
||||
|
||||
bool isLiveOutOfMBB(const LiveInterval &li,
|
||||
const MachineBasicBlock *mbb) const {
|
||||
return li.liveAt(getMBBEndIdx(mbb).getPrevSlot());
|
||||
}
|
||||
|
||||
MachineBasicBlock* getMBBFromIndex(SlotIndex index) const {
|
||||
return Indexes->getMBBFromIndex(index);
|
||||
}
|
||||
|
||||
void insertMBBInMaps(MachineBasicBlock *MBB) {
|
||||
Indexes->insertMBBInMaps(MBB);
|
||||
assert(unsigned(MBB->getNumber()) == RegMaskBlocks.size() &&
|
||||
"Blocks must be added in order.");
|
||||
RegMaskBlocks.push_back(std::make_pair(RegMaskSlots.size(), 0));
|
||||
}
|
||||
|
||||
SlotIndex InsertMachineInstrInMaps(MachineInstr *MI) {
|
||||
return Indexes->insertMachineInstrInMaps(MI);
|
||||
}
|
||||
|
||||
void RemoveMachineInstrFromMaps(MachineInstr *MI) {
|
||||
Indexes->removeMachineInstrFromMaps(MI);
|
||||
}
|
||||
|
||||
void ReplaceMachineInstrInMaps(MachineInstr *MI, MachineInstr *NewMI) {
|
||||
Indexes->replaceMachineInstrInMaps(MI, NewMI);
|
||||
}
|
||||
|
||||
bool findLiveInMBBs(SlotIndex Start, SlotIndex End,
|
||||
SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
|
||||
return Indexes->findLiveInMBBs(Start, End, MBBs);
|
||||
}
|
||||
|
||||
VNInfo::Allocator& getVNInfoAllocator() { return VNInfoAllocator; }
|
||||
|
||||
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
|
||||
virtual void releaseMemory();
|
||||
|
||||
/// runOnMachineFunction - pass entry point
|
||||
virtual bool runOnMachineFunction(MachineFunction&);
|
||||
|
||||
/// print - Implement the dump method.
|
||||
virtual void print(raw_ostream &O, const Module* = 0) const;
|
||||
|
||||
/// intervalIsInOneMBB - If LI is confined to a single basic block, return
|
||||
/// a pointer to that block. If LI is live in to or out of any block,
|
||||
/// return NULL.
|
||||
MachineBasicBlock *intervalIsInOneMBB(const LiveInterval &LI) const;
|
||||
|
||||
/// Returns true if VNI is killed by any PHI-def values in LI.
|
||||
/// This may conservatively return true to avoid expensive computations.
|
||||
bool hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const;
|
||||
|
||||
/// addKillFlags - Add kill flags to any instruction that kills a virtual
|
||||
/// register.
|
||||
void addKillFlags(const VirtRegMap*);
|
||||
|
||||
/// handleMove - call this method to notify LiveIntervals that
|
||||
/// instruction 'mi' has been moved within a basic block. This will update
|
||||
/// the live intervals for all operands of mi. Moves between basic blocks
|
||||
/// are not supported.
|
||||
///
|
||||
/// \param UpdateFlags Update live intervals for nonallocatable physregs.
|
||||
void handleMove(MachineInstr* MI, bool UpdateFlags = false);
|
||||
|
||||
/// moveIntoBundle - Update intervals for operands of MI so that they
|
||||
/// begin/end on the SlotIndex for BundleStart.
|
||||
///
|
||||
/// \param UpdateFlags Update live intervals for nonallocatable physregs.
|
||||
///
|
||||
/// Requires MI and BundleStart to have SlotIndexes, and assumes
|
||||
/// existing liveness is accurate. BundleStart should be the first
|
||||
/// instruction in the Bundle.
|
||||
void handleMoveIntoBundle(MachineInstr* MI, MachineInstr* BundleStart,
|
||||
bool UpdateFlags = false);
|
||||
|
||||
/// repairIntervalsInRange - Update live intervals for instructions in a
|
||||
/// range of iterators. It is intended for use after target hooks that may
|
||||
/// insert or remove instructions, and is only efficient for a small number
|
||||
/// of instructions.
|
||||
///
|
||||
/// OrigRegs is a vector of registers that were originally used by the
|
||||
/// instructions in the range between the two iterators.
|
||||
///
|
||||
/// Currently, the only only changes that are supported are simple removal
|
||||
/// and addition of uses.
|
||||
void repairIntervalsInRange(MachineBasicBlock *MBB,
|
||||
MachineBasicBlock::iterator Begin,
|
||||
MachineBasicBlock::iterator End,
|
||||
ArrayRef<unsigned> OrigRegs);
|
||||
|
||||
// Register mask functions.
|
||||
//
|
||||
// Machine instructions may use a register mask operand to indicate that a
|
||||
// large number of registers are clobbered by the instruction. This is
|
||||
// typically used for calls.
|
||||
//
|
||||
// For compile time performance reasons, these clobbers are not recorded in
|
||||
// the live intervals for individual physical registers. Instead,
|
||||
// LiveIntervalAnalysis maintains a sorted list of instructions with
|
||||
// register mask operands.
|
||||
|
||||
/// getRegMaskSlots - Returns a sorted array of slot indices of all
|
||||
/// instructions with register mask operands.
|
||||
ArrayRef<SlotIndex> getRegMaskSlots() const { return RegMaskSlots; }
|
||||
|
||||
/// getRegMaskSlotsInBlock - Returns a sorted array of slot indices of all
|
||||
/// instructions with register mask operands in the basic block numbered
|
||||
/// MBBNum.
|
||||
ArrayRef<SlotIndex> getRegMaskSlotsInBlock(unsigned MBBNum) const {
|
||||
std::pair<unsigned, unsigned> P = RegMaskBlocks[MBBNum];
|
||||
return getRegMaskSlots().slice(P.first, P.second);
|
||||
}
|
||||
|
||||
/// getRegMaskBits() - Returns an array of register mask pointers
|
||||
/// corresponding to getRegMaskSlots().
|
||||
ArrayRef<const uint32_t*> getRegMaskBits() const { return RegMaskBits; }
|
||||
|
||||
/// getRegMaskBitsInBlock - Returns an array of mask pointers corresponding
|
||||
/// to getRegMaskSlotsInBlock(MBBNum).
|
||||
ArrayRef<const uint32_t*> getRegMaskBitsInBlock(unsigned MBBNum) const {
|
||||
std::pair<unsigned, unsigned> P = RegMaskBlocks[MBBNum];
|
||||
return getRegMaskBits().slice(P.first, P.second);
|
||||
}
|
||||
|
||||
/// checkRegMaskInterference - Test if LI is live across any register mask
|
||||
/// instructions, and compute a bit mask of physical registers that are not
|
||||
/// clobbered by any of them.
|
||||
///
|
||||
/// Returns false if LI doesn't cross any register mask instructions. In
|
||||
/// that case, the bit vector is not filled in.
|
||||
bool checkRegMaskInterference(LiveInterval &LI,
|
||||
BitVector &UsableRegs);
|
||||
|
||||
// Register unit functions.
|
||||
//
|
||||
// Fixed interference occurs when MachineInstrs use physregs directly
|
||||
// instead of virtual registers. This typically happens when passing
|
||||
// arguments to a function call, or when instructions require operands in
|
||||
// fixed registers.
|
||||
//
|
||||
// Each physreg has one or more register units, see MCRegisterInfo. We
|
||||
// track liveness per register unit to handle aliasing registers more
|
||||
// efficiently.
|
||||
|
||||
/// getRegUnit - Return the live range for Unit.
|
||||
/// It will be computed if it doesn't exist.
|
||||
LiveInterval &getRegUnit(unsigned Unit) {
|
||||
LiveInterval *LI = RegUnitIntervals[Unit];
|
||||
if (!LI) {
|
||||
// Compute missing ranges on demand.
|
||||
RegUnitIntervals[Unit] = LI = new LiveInterval(Unit, HUGE_VALF);
|
||||
computeRegUnitInterval(LI);
|
||||
}
|
||||
return *LI;
|
||||
}
|
||||
|
||||
/// getCachedRegUnit - Return the live range for Unit if it has already
|
||||
/// been computed, or NULL if it hasn't been computed yet.
|
||||
LiveInterval *getCachedRegUnit(unsigned Unit) {
|
||||
return RegUnitIntervals[Unit];
|
||||
}
|
||||
|
||||
const LiveInterval *getCachedRegUnit(unsigned Unit) const {
|
||||
return RegUnitIntervals[Unit];
|
||||
}
|
||||
|
||||
private:
|
||||
/// Compute live intervals for all virtual registers.
|
||||
void computeVirtRegs();
|
||||
|
||||
/// Compute RegMaskSlots and RegMaskBits.
|
||||
void computeRegMasks();
|
||||
|
||||
static LiveInterval* createInterval(unsigned Reg);
|
||||
|
||||
void printInstrs(raw_ostream &O) const;
|
||||
void dumpInstrs() const;
|
||||
|
||||
void computeLiveInRegUnits();
|
||||
void computeRegUnitInterval(LiveInterval*);
|
||||
void computeVirtRegInterval(LiveInterval*);
|
||||
|
||||
class HMEditor;
|
||||
};
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
205
thirdparty/clang/include/llvm/CodeGen/LiveIntervalUnion.h
vendored
Normal file
205
thirdparty/clang/include/llvm/CodeGen/LiveIntervalUnion.h
vendored
Normal file
@@ -0,0 +1,205 @@
|
||||
//===-- LiveIntervalUnion.h - Live interval union data struct --*- C++ -*--===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// LiveIntervalUnion is a union of live segments across multiple live virtual
|
||||
// registers. This may be used during coalescing to represent a congruence
|
||||
// class, or during register allocation to model liveness of a physical
|
||||
// register.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_LIVEINTERVALUNION_H
|
||||
#define LLVM_CODEGEN_LIVEINTERVALUNION_H
|
||||
|
||||
#include "llvm/ADT/IntervalMap.h"
|
||||
#include "llvm/CodeGen/LiveInterval.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class TargetRegisterInfo;
|
||||
|
||||
#ifndef NDEBUG
|
||||
// forward declaration
|
||||
template <unsigned Element> class SparseBitVector;
|
||||
typedef SparseBitVector<128> LiveVirtRegBitSet;
|
||||
#endif
|
||||
|
||||
/// Compare a live virtual register segment to a LiveIntervalUnion segment.
|
||||
inline bool
|
||||
overlap(const LiveRange &VRSeg,
|
||||
const IntervalMap<SlotIndex, LiveInterval*>::const_iterator &LUSeg) {
|
||||
return VRSeg.start < LUSeg.stop() && LUSeg.start() < VRSeg.end;
|
||||
}
|
||||
|
||||
/// Union of live intervals that are strong candidates for coalescing into a
|
||||
/// single register (either physical or virtual depending on the context). We
|
||||
/// expect the constituent live intervals to be disjoint, although we may
|
||||
/// eventually make exceptions to handle value-based interference.
|
||||
class LiveIntervalUnion {
|
||||
// A set of live virtual register segments that supports fast insertion,
|
||||
// intersection, and removal.
|
||||
// Mapping SlotIndex intervals to virtual register numbers.
|
||||
typedef IntervalMap<SlotIndex, LiveInterval*> LiveSegments;
|
||||
|
||||
public:
|
||||
// SegmentIter can advance to the next segment ordered by starting position
|
||||
// which may belong to a different live virtual register. We also must be able
|
||||
// to reach the current segment's containing virtual register.
|
||||
typedef LiveSegments::iterator SegmentIter;
|
||||
|
||||
// LiveIntervalUnions share an external allocator.
|
||||
typedef LiveSegments::Allocator Allocator;
|
||||
|
||||
class Query;
|
||||
|
||||
private:
|
||||
unsigned Tag; // unique tag for current contents.
|
||||
LiveSegments Segments; // union of virtual reg segments
|
||||
|
||||
public:
|
||||
explicit LiveIntervalUnion(Allocator &a) : Tag(0), Segments(a) {}
|
||||
|
||||
// Iterate over all segments in the union of live virtual registers ordered
|
||||
// by their starting position.
|
||||
SegmentIter begin() { return Segments.begin(); }
|
||||
SegmentIter end() { return Segments.end(); }
|
||||
SegmentIter find(SlotIndex x) { return Segments.find(x); }
|
||||
bool empty() const { return Segments.empty(); }
|
||||
SlotIndex startIndex() const { return Segments.start(); }
|
||||
|
||||
// Provide public access to the underlying map to allow overlap iteration.
|
||||
typedef LiveSegments Map;
|
||||
const Map &getMap() { return Segments; }
|
||||
|
||||
/// getTag - Return an opaque tag representing the current state of the union.
|
||||
unsigned getTag() const { return Tag; }
|
||||
|
||||
/// changedSince - Return true if the union change since getTag returned tag.
|
||||
bool changedSince(unsigned tag) const { return tag != Tag; }
|
||||
|
||||
// Add a live virtual register to this union and merge its segments.
|
||||
void unify(LiveInterval &VirtReg);
|
||||
|
||||
// Remove a live virtual register's segments from this union.
|
||||
void extract(LiveInterval &VirtReg);
|
||||
|
||||
// Remove all inserted virtual registers.
|
||||
void clear() { Segments.clear(); ++Tag; }
|
||||
|
||||
// Print union, using TRI to translate register names
|
||||
void print(raw_ostream &OS, const TargetRegisterInfo *TRI) const;
|
||||
|
||||
#ifndef NDEBUG
|
||||
// Verify the live intervals in this union and add them to the visited set.
|
||||
void verify(LiveVirtRegBitSet& VisitedVRegs);
|
||||
#endif
|
||||
|
||||
/// Query interferences between a single live virtual register and a live
|
||||
/// interval union.
|
||||
class Query {
|
||||
LiveIntervalUnion *LiveUnion;
|
||||
LiveInterval *VirtReg;
|
||||
LiveInterval::iterator VirtRegI; // current position in VirtReg
|
||||
SegmentIter LiveUnionI; // current position in LiveUnion
|
||||
SmallVector<LiveInterval*,4> InterferingVRegs;
|
||||
bool CheckedFirstInterference;
|
||||
bool SeenAllInterferences;
|
||||
bool SeenUnspillableVReg;
|
||||
unsigned Tag, UserTag;
|
||||
|
||||
public:
|
||||
Query(): LiveUnion(), VirtReg(), Tag(0), UserTag(0) {}
|
||||
|
||||
Query(LiveInterval *VReg, LiveIntervalUnion *LIU):
|
||||
LiveUnion(LIU), VirtReg(VReg), CheckedFirstInterference(false),
|
||||
SeenAllInterferences(false), SeenUnspillableVReg(false)
|
||||
{}
|
||||
|
||||
void clear() {
|
||||
LiveUnion = NULL;
|
||||
VirtReg = NULL;
|
||||
InterferingVRegs.clear();
|
||||
CheckedFirstInterference = false;
|
||||
SeenAllInterferences = false;
|
||||
SeenUnspillableVReg = false;
|
||||
Tag = 0;
|
||||
UserTag = 0;
|
||||
}
|
||||
|
||||
void init(unsigned UTag, LiveInterval *VReg, LiveIntervalUnion *LIU) {
|
||||
assert(VReg && LIU && "Invalid arguments");
|
||||
if (UserTag == UTag && VirtReg == VReg &&
|
||||
LiveUnion == LIU && !LIU->changedSince(Tag)) {
|
||||
// Retain cached results, e.g. firstInterference.
|
||||
return;
|
||||
}
|
||||
clear();
|
||||
LiveUnion = LIU;
|
||||
VirtReg = VReg;
|
||||
Tag = LIU->getTag();
|
||||
UserTag = UTag;
|
||||
}
|
||||
|
||||
LiveInterval &virtReg() const {
|
||||
assert(VirtReg && "uninitialized");
|
||||
return *VirtReg;
|
||||
}
|
||||
|
||||
// Does this live virtual register interfere with the union?
|
||||
bool checkInterference() { return collectInterferingVRegs(1); }
|
||||
|
||||
// Count the virtual registers in this union that interfere with this
|
||||
// query's live virtual register, up to maxInterferingRegs.
|
||||
unsigned collectInterferingVRegs(unsigned MaxInterferingRegs = UINT_MAX);
|
||||
|
||||
// Was this virtual register visited during collectInterferingVRegs?
|
||||
bool isSeenInterference(LiveInterval *VReg) const;
|
||||
|
||||
// Did collectInterferingVRegs collect all interferences?
|
||||
bool seenAllInterferences() const { return SeenAllInterferences; }
|
||||
|
||||
// Did collectInterferingVRegs encounter an unspillable vreg?
|
||||
bool seenUnspillableVReg() const { return SeenUnspillableVReg; }
|
||||
|
||||
// Vector generated by collectInterferingVRegs.
|
||||
const SmallVectorImpl<LiveInterval*> &interferingVRegs() const {
|
||||
return InterferingVRegs;
|
||||
}
|
||||
|
||||
private:
|
||||
Query(const Query&) LLVM_DELETED_FUNCTION;
|
||||
void operator=(const Query&) LLVM_DELETED_FUNCTION;
|
||||
};
|
||||
|
||||
// Array of LiveIntervalUnions.
|
||||
class Array {
|
||||
unsigned Size;
|
||||
LiveIntervalUnion *LIUs;
|
||||
public:
|
||||
Array() : Size(0), LIUs(0) {}
|
||||
~Array() { clear(); }
|
||||
|
||||
// Initialize the array to have Size entries.
|
||||
// Reuse an existing allocation if the size matches.
|
||||
void init(LiveIntervalUnion::Allocator&, unsigned Size);
|
||||
|
||||
unsigned size() const { return Size; }
|
||||
|
||||
void clear();
|
||||
|
||||
LiveIntervalUnion& operator[](unsigned idx) {
|
||||
assert(idx < Size && "idx out of bounds");
|
||||
return LIUs[idx];
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
} // end namespace llvm
|
||||
|
||||
#endif // !defined(LLVM_CODEGEN_LIVEINTERVALUNION_H)
|
||||
210
thirdparty/clang/include/llvm/CodeGen/LiveRangeEdit.h
vendored
Normal file
210
thirdparty/clang/include/llvm/CodeGen/LiveRangeEdit.h
vendored
Normal file
@@ -0,0 +1,210 @@
|
||||
//===---- LiveRangeEdit.h - Basic tools for split and spill -----*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// The LiveRangeEdit class represents changes done to a virtual register when it
|
||||
// is spilled or split.
|
||||
//
|
||||
// The parent register is never changed. Instead, a number of new virtual
|
||||
// registers are created and added to the newRegs vector.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_LIVERANGEEDIT_H
|
||||
#define LLVM_CODEGEN_LIVERANGEEDIT_H
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/SmallPtrSet.h"
|
||||
#include "llvm/CodeGen/LiveInterval.h"
|
||||
#include "llvm/Target/TargetMachine.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class AliasAnalysis;
|
||||
class LiveIntervals;
|
||||
class MachineLoopInfo;
|
||||
class MachineRegisterInfo;
|
||||
class VirtRegMap;
|
||||
|
||||
class LiveRangeEdit {
|
||||
public:
|
||||
/// Callback methods for LiveRangeEdit owners.
|
||||
class Delegate {
|
||||
virtual void anchor();
|
||||
public:
|
||||
/// Called immediately before erasing a dead machine instruction.
|
||||
virtual void LRE_WillEraseInstruction(MachineInstr *MI) {}
|
||||
|
||||
/// Called when a virtual register is no longer used. Return false to defer
|
||||
/// its deletion from LiveIntervals.
|
||||
virtual bool LRE_CanEraseVirtReg(unsigned) { return true; }
|
||||
|
||||
/// Called before shrinking the live range of a virtual register.
|
||||
virtual void LRE_WillShrinkVirtReg(unsigned) {}
|
||||
|
||||
/// Called after cloning a virtual register.
|
||||
/// This is used for new registers representing connected components of Old.
|
||||
virtual void LRE_DidCloneVirtReg(unsigned New, unsigned Old) {}
|
||||
|
||||
virtual ~Delegate() {}
|
||||
};
|
||||
|
||||
private:
|
||||
LiveInterval *Parent;
|
||||
SmallVectorImpl<LiveInterval*> &NewRegs;
|
||||
MachineRegisterInfo &MRI;
|
||||
LiveIntervals &LIS;
|
||||
VirtRegMap *VRM;
|
||||
const TargetInstrInfo &TII;
|
||||
Delegate *const TheDelegate;
|
||||
|
||||
/// FirstNew - Index of the first register added to NewRegs.
|
||||
const unsigned FirstNew;
|
||||
|
||||
/// ScannedRemattable - true when remattable values have been identified.
|
||||
bool ScannedRemattable;
|
||||
|
||||
/// Remattable - Values defined by remattable instructions as identified by
|
||||
/// tii.isTriviallyReMaterializable().
|
||||
SmallPtrSet<const VNInfo*,4> Remattable;
|
||||
|
||||
/// Rematted - Values that were actually rematted, and so need to have their
|
||||
/// live range trimmed or entirely removed.
|
||||
SmallPtrSet<const VNInfo*,4> Rematted;
|
||||
|
||||
/// scanRemattable - Identify the Parent values that may rematerialize.
|
||||
void scanRemattable(AliasAnalysis *aa);
|
||||
|
||||
/// allUsesAvailableAt - Return true if all registers used by OrigMI at
|
||||
/// OrigIdx are also available with the same value at UseIdx.
|
||||
bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx,
|
||||
SlotIndex UseIdx) const;
|
||||
|
||||
/// foldAsLoad - If LI has a single use and a single def that can be folded as
|
||||
/// a load, eliminate the register by folding the def into the use.
|
||||
bool foldAsLoad(LiveInterval *LI, SmallVectorImpl<MachineInstr*> &Dead);
|
||||
|
||||
public:
|
||||
/// Create a LiveRangeEdit for breaking down parent into smaller pieces.
|
||||
/// @param parent The register being spilled or split.
|
||||
/// @param newRegs List to receive any new registers created. This needn't be
|
||||
/// empty initially, any existing registers are ignored.
|
||||
/// @param MF The MachineFunction the live range edit is taking place in.
|
||||
/// @param lis The collection of all live intervals in this function.
|
||||
/// @param vrm Map of virtual registers to physical registers for this
|
||||
/// function. If NULL, no virtual register map updates will
|
||||
/// be done. This could be the case if called before Regalloc.
|
||||
LiveRangeEdit(LiveInterval *parent,
|
||||
SmallVectorImpl<LiveInterval*> &newRegs,
|
||||
MachineFunction &MF,
|
||||
LiveIntervals &lis,
|
||||
VirtRegMap *vrm,
|
||||
Delegate *delegate = 0)
|
||||
: Parent(parent), NewRegs(newRegs),
|
||||
MRI(MF.getRegInfo()), LIS(lis), VRM(vrm),
|
||||
TII(*MF.getTarget().getInstrInfo()),
|
||||
TheDelegate(delegate),
|
||||
FirstNew(newRegs.size()),
|
||||
ScannedRemattable(false) {}
|
||||
|
||||
LiveInterval &getParent() const {
|
||||
assert(Parent && "No parent LiveInterval");
|
||||
return *Parent;
|
||||
}
|
||||
unsigned getReg() const { return getParent().reg; }
|
||||
|
||||
/// Iterator for accessing the new registers added by this edit.
|
||||
typedef SmallVectorImpl<LiveInterval*>::const_iterator iterator;
|
||||
iterator begin() const { return NewRegs.begin()+FirstNew; }
|
||||
iterator end() const { return NewRegs.end(); }
|
||||
unsigned size() const { return NewRegs.size()-FirstNew; }
|
||||
bool empty() const { return size() == 0; }
|
||||
LiveInterval *get(unsigned idx) const { return NewRegs[idx+FirstNew]; }
|
||||
|
||||
ArrayRef<LiveInterval*> regs() const {
|
||||
return makeArrayRef(NewRegs).slice(FirstNew);
|
||||
}
|
||||
|
||||
/// createFrom - Create a new virtual register based on OldReg.
|
||||
LiveInterval &createFrom(unsigned OldReg);
|
||||
|
||||
/// create - Create a new register with the same class and original slot as
|
||||
/// parent.
|
||||
LiveInterval &create() {
|
||||
return createFrom(getReg());
|
||||
}
|
||||
|
||||
/// anyRematerializable - Return true if any parent values may be
|
||||
/// rematerializable.
|
||||
/// This function must be called before any rematerialization is attempted.
|
||||
bool anyRematerializable(AliasAnalysis*);
|
||||
|
||||
/// checkRematerializable - Manually add VNI to the list of rematerializable
|
||||
/// values if DefMI may be rematerializable.
|
||||
bool checkRematerializable(VNInfo *VNI, const MachineInstr *DefMI,
|
||||
AliasAnalysis*);
|
||||
|
||||
/// Remat - Information needed to rematerialize at a specific location.
|
||||
struct Remat {
|
||||
VNInfo *ParentVNI; // parent_'s value at the remat location.
|
||||
MachineInstr *OrigMI; // Instruction defining ParentVNI.
|
||||
explicit Remat(VNInfo *ParentVNI) : ParentVNI(ParentVNI), OrigMI(0) {}
|
||||
};
|
||||
|
||||
/// canRematerializeAt - Determine if ParentVNI can be rematerialized at
|
||||
/// UseIdx. It is assumed that parent_.getVNINfoAt(UseIdx) == ParentVNI.
|
||||
/// When cheapAsAMove is set, only cheap remats are allowed.
|
||||
bool canRematerializeAt(Remat &RM,
|
||||
SlotIndex UseIdx,
|
||||
bool cheapAsAMove);
|
||||
|
||||
/// rematerializeAt - Rematerialize RM.ParentVNI into DestReg by inserting an
|
||||
/// instruction into MBB before MI. The new instruction is mapped, but
|
||||
/// liveness is not updated.
|
||||
/// Return the SlotIndex of the new instruction.
|
||||
SlotIndex rematerializeAt(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI,
|
||||
unsigned DestReg,
|
||||
const Remat &RM,
|
||||
const TargetRegisterInfo&,
|
||||
bool Late = false);
|
||||
|
||||
/// markRematerialized - explicitly mark a value as rematerialized after doing
|
||||
/// it manually.
|
||||
void markRematerialized(const VNInfo *ParentVNI) {
|
||||
Rematted.insert(ParentVNI);
|
||||
}
|
||||
|
||||
/// didRematerialize - Return true if ParentVNI was rematerialized anywhere.
|
||||
bool didRematerialize(const VNInfo *ParentVNI) const {
|
||||
return Rematted.count(ParentVNI);
|
||||
}
|
||||
|
||||
/// eraseVirtReg - Notify the delegate that Reg is no longer in use, and try
|
||||
/// to erase it from LIS.
|
||||
void eraseVirtReg(unsigned Reg);
|
||||
|
||||
/// eliminateDeadDefs - Try to delete machine instructions that are now dead
|
||||
/// (allDefsAreDead returns true). This may cause live intervals to be trimmed
|
||||
/// and further dead efs to be eliminated.
|
||||
/// RegsBeingSpilled lists registers currently being spilled by the register
|
||||
/// allocator. These registers should not be split into new intervals
|
||||
/// as currently those new intervals are not guaranteed to spill.
|
||||
void eliminateDeadDefs(SmallVectorImpl<MachineInstr*> &Dead,
|
||||
ArrayRef<unsigned> RegsBeingSpilled
|
||||
= ArrayRef<unsigned>());
|
||||
|
||||
/// calculateRegClassAndHint - Recompute register class and hint for each new
|
||||
/// register.
|
||||
void calculateRegClassAndHint(MachineFunction&,
|
||||
const MachineLoopInfo&);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
148
thirdparty/clang/include/llvm/CodeGen/LiveRegMatrix.h
vendored
Normal file
148
thirdparty/clang/include/llvm/CodeGen/LiveRegMatrix.h
vendored
Normal file
@@ -0,0 +1,148 @@
|
||||
//===-- LiveRegMatrix.h - Track register interference ---------*- C++ -*---===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// The LiveRegMatrix analysis pass keeps track of virtual register interference
|
||||
// along two dimensions: Slot indexes and register units. The matrix is used by
|
||||
// register allocators to ensure that no interfering virtual registers get
|
||||
// assigned to overlapping physical registers.
|
||||
//
|
||||
// Register units are defined in MCRegisterInfo.h, they represent the smallest
|
||||
// unit of interference when dealing with overlapping physical registers. The
|
||||
// LiveRegMatrix is represented as a LiveIntervalUnion per register unit. When
|
||||
// a virtual register is assigned to a physical register, the live range for
|
||||
// the virtual register is inserted into the LiveIntervalUnion for each regunit
|
||||
// in the physreg.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_LIVEREGMATRIX_H
|
||||
#define LLVM_CODEGEN_LIVEREGMATRIX_H
|
||||
|
||||
#include "llvm/ADT/BitVector.h"
|
||||
#include "llvm/ADT/OwningPtr.h"
|
||||
#include "llvm/CodeGen/LiveIntervalUnion.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class LiveInterval;
|
||||
class LiveIntervalAnalysis;
|
||||
class MachineRegisterInfo;
|
||||
class TargetRegisterInfo;
|
||||
class VirtRegMap;
|
||||
|
||||
class LiveRegMatrix : public MachineFunctionPass {
|
||||
const TargetRegisterInfo *TRI;
|
||||
MachineRegisterInfo *MRI;
|
||||
LiveIntervals *LIS;
|
||||
VirtRegMap *VRM;
|
||||
|
||||
// UserTag changes whenever virtual registers have been modified.
|
||||
unsigned UserTag;
|
||||
|
||||
// The matrix is represented as a LiveIntervalUnion per register unit.
|
||||
LiveIntervalUnion::Allocator LIUAlloc;
|
||||
LiveIntervalUnion::Array Matrix;
|
||||
|
||||
// Cached queries per register unit.
|
||||
OwningArrayPtr<LiveIntervalUnion::Query> Queries;
|
||||
|
||||
// Cached register mask interference info.
|
||||
unsigned RegMaskTag;
|
||||
unsigned RegMaskVirtReg;
|
||||
BitVector RegMaskUsable;
|
||||
|
||||
// MachineFunctionPass boilerplate.
|
||||
virtual void getAnalysisUsage(AnalysisUsage&) const;
|
||||
virtual bool runOnMachineFunction(MachineFunction&);
|
||||
virtual void releaseMemory();
|
||||
public:
|
||||
static char ID;
|
||||
LiveRegMatrix();
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// High-level interface.
|
||||
//===--------------------------------------------------------------------===//
|
||||
//
|
||||
// Check for interference before assigning virtual registers to physical
|
||||
// registers.
|
||||
//
|
||||
|
||||
/// Invalidate cached interference queries after modifying virtual register
|
||||
/// live ranges. Interference checks may return stale information unless
|
||||
/// caches are invalidated.
|
||||
void invalidateVirtRegs() { ++UserTag; }
|
||||
|
||||
enum InterferenceKind {
|
||||
/// No interference, go ahead and assign.
|
||||
IK_Free = 0,
|
||||
|
||||
/// Virtual register interference. There are interfering virtual registers
|
||||
/// assigned to PhysReg or its aliases. This interference could be resolved
|
||||
/// by unassigning those other virtual registers.
|
||||
IK_VirtReg,
|
||||
|
||||
/// Register unit interference. A fixed live range is in the way, typically
|
||||
/// argument registers for a call. This can't be resolved by unassigning
|
||||
/// other virtual registers.
|
||||
IK_RegUnit,
|
||||
|
||||
/// RegMask interference. The live range is crossing an instruction with a
|
||||
/// regmask operand that doesn't preserve PhysReg. This typically means
|
||||
/// VirtReg is live across a call, and PhysReg isn't call-preserved.
|
||||
IK_RegMask
|
||||
};
|
||||
|
||||
/// Check for interference before assigning VirtReg to PhysReg.
|
||||
/// If this function returns IK_Free, it is legal to assign(VirtReg, PhysReg).
|
||||
/// When there is more than one kind of interference, the InterferenceKind
|
||||
/// with the highest enum value is returned.
|
||||
InterferenceKind checkInterference(LiveInterval &VirtReg, unsigned PhysReg);
|
||||
|
||||
/// Assign VirtReg to PhysReg.
|
||||
/// This will mark VirtReg's live range as occupied in the LiveRegMatrix and
|
||||
/// update VirtRegMap. The live range is expected to be available in PhysReg.
|
||||
void assign(LiveInterval &VirtReg, unsigned PhysReg);
|
||||
|
||||
/// Unassign VirtReg from its PhysReg.
|
||||
/// Assuming that VirtReg was previously assigned to a PhysReg, this undoes
|
||||
/// the assignment and updates VirtRegMap accordingly.
|
||||
void unassign(LiveInterval &VirtReg);
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Low-level interface.
|
||||
//===--------------------------------------------------------------------===//
|
||||
//
|
||||
// Provide access to the underlying LiveIntervalUnions.
|
||||
//
|
||||
|
||||
/// Check for regmask interference only.
|
||||
/// Return true if VirtReg crosses a regmask operand that clobbers PhysReg.
|
||||
/// If PhysReg is null, check if VirtReg crosses any regmask operands.
|
||||
bool checkRegMaskInterference(LiveInterval &VirtReg, unsigned PhysReg = 0);
|
||||
|
||||
/// Check for regunit interference only.
|
||||
/// Return true if VirtReg overlaps a fixed assignment of one of PhysRegs's
|
||||
/// register units.
|
||||
bool checkRegUnitInterference(LiveInterval &VirtReg, unsigned PhysReg);
|
||||
|
||||
/// Query a line of the assigned virtual register matrix directly.
|
||||
/// Use MCRegUnitIterator to enumerate all regunits in the desired PhysReg.
|
||||
/// This returns a reference to an internal Query data structure that is only
|
||||
/// valid until the next query() call.
|
||||
LiveIntervalUnion::Query &query(LiveInterval &VirtReg, unsigned RegUnit);
|
||||
|
||||
/// Directly access the live interval unions per regunit.
|
||||
/// This returns an array indexed by the regunit number.
|
||||
LiveIntervalUnion *getLiveUnions() { return &Matrix[0]; }
|
||||
};
|
||||
|
||||
} // end namespace llvm
|
||||
|
||||
#endif // LLVM_CODEGEN_LIVEREGMATRIX_H
|
||||
99
thirdparty/clang/include/llvm/CodeGen/LiveStackAnalysis.h
vendored
Normal file
99
thirdparty/clang/include/llvm/CodeGen/LiveStackAnalysis.h
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
//===-- LiveStackAnalysis.h - Live Stack Slot Analysis ----------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file implements the live stack slot analysis pass. It is analogous to
|
||||
// live interval analysis except it's analyzing liveness of stack slots rather
|
||||
// than registers.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_LIVESTACKANALYSIS_H
|
||||
#define LLVM_CODEGEN_LIVESTACKANALYSIS_H
|
||||
|
||||
#include "llvm/CodeGen/LiveInterval.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
#include "llvm/Support/Allocator.h"
|
||||
#include "llvm/Target/TargetRegisterInfo.h"
|
||||
#include <map>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class LiveStacks : public MachineFunctionPass {
|
||||
const TargetRegisterInfo *TRI;
|
||||
|
||||
/// Special pool allocator for VNInfo's (LiveInterval val#).
|
||||
///
|
||||
VNInfo::Allocator VNInfoAllocator;
|
||||
|
||||
/// S2IMap - Stack slot indices to live interval mapping.
|
||||
///
|
||||
typedef std::map<int, LiveInterval> SS2IntervalMap;
|
||||
SS2IntervalMap S2IMap;
|
||||
|
||||
/// S2RCMap - Stack slot indices to register class mapping.
|
||||
std::map<int, const TargetRegisterClass*> S2RCMap;
|
||||
|
||||
public:
|
||||
static char ID; // Pass identification, replacement for typeid
|
||||
LiveStacks() : MachineFunctionPass(ID) {
|
||||
initializeLiveStacksPass(*PassRegistry::getPassRegistry());
|
||||
}
|
||||
|
||||
typedef SS2IntervalMap::iterator iterator;
|
||||
typedef SS2IntervalMap::const_iterator const_iterator;
|
||||
const_iterator begin() const { return S2IMap.begin(); }
|
||||
const_iterator end() const { return S2IMap.end(); }
|
||||
iterator begin() { return S2IMap.begin(); }
|
||||
iterator end() { return S2IMap.end(); }
|
||||
|
||||
unsigned getNumIntervals() const { return (unsigned)S2IMap.size(); }
|
||||
|
||||
LiveInterval &getOrCreateInterval(int Slot, const TargetRegisterClass *RC);
|
||||
|
||||
LiveInterval &getInterval(int Slot) {
|
||||
assert(Slot >= 0 && "Spill slot indice must be >= 0");
|
||||
SS2IntervalMap::iterator I = S2IMap.find(Slot);
|
||||
assert(I != S2IMap.end() && "Interval does not exist for stack slot");
|
||||
return I->second;
|
||||
}
|
||||
|
||||
const LiveInterval &getInterval(int Slot) const {
|
||||
assert(Slot >= 0 && "Spill slot indice must be >= 0");
|
||||
SS2IntervalMap::const_iterator I = S2IMap.find(Slot);
|
||||
assert(I != S2IMap.end() && "Interval does not exist for stack slot");
|
||||
return I->second;
|
||||
}
|
||||
|
||||
bool hasInterval(int Slot) const {
|
||||
return S2IMap.count(Slot);
|
||||
}
|
||||
|
||||
const TargetRegisterClass *getIntervalRegClass(int Slot) const {
|
||||
assert(Slot >= 0 && "Spill slot indice must be >= 0");
|
||||
std::map<int, const TargetRegisterClass*>::const_iterator
|
||||
I = S2RCMap.find(Slot);
|
||||
assert(I != S2RCMap.end() &&
|
||||
"Register class info does not exist for stack slot");
|
||||
return I->second;
|
||||
}
|
||||
|
||||
VNInfo::Allocator& getVNInfoAllocator() { return VNInfoAllocator; }
|
||||
|
||||
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
|
||||
virtual void releaseMemory();
|
||||
|
||||
/// runOnMachineFunction - pass entry point
|
||||
virtual bool runOnMachineFunction(MachineFunction&);
|
||||
|
||||
/// print - Implement the dump method.
|
||||
virtual void print(raw_ostream &O, const Module* = 0) const;
|
||||
};
|
||||
}
|
||||
|
||||
#endif /* LLVM_CODEGEN_LIVESTACK_ANALYSIS_H */
|
||||
307
thirdparty/clang/include/llvm/CodeGen/LiveVariables.h
vendored
Normal file
307
thirdparty/clang/include/llvm/CodeGen/LiveVariables.h
vendored
Normal file
@@ -0,0 +1,307 @@
|
||||
//===-- llvm/CodeGen/LiveVariables.h - Live Variable Analysis ---*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file implements the LiveVariables analysis pass. For each machine
|
||||
// instruction in the function, this pass calculates the set of registers that
|
||||
// are immediately dead after the instruction (i.e., the instruction calculates
|
||||
// the value, but it is never used) and the set of registers that are used by
|
||||
// the instruction, but are never used after the instruction (i.e., they are
|
||||
// killed).
|
||||
//
|
||||
// This class computes live variables using a sparse implementation based on
|
||||
// the machine code SSA form. This class computes live variable information for
|
||||
// each virtual and _register allocatable_ physical register in a function. It
|
||||
// uses the dominance properties of SSA form to efficiently compute live
|
||||
// variables for virtual registers, and assumes that physical registers are only
|
||||
// live within a single basic block (allowing it to do a single local analysis
|
||||
// to resolve physical register lifetimes in each basic block). If a physical
|
||||
// register is not register allocatable, it is not tracked. This is useful for
|
||||
// things like the stack pointer and condition codes.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_LIVEVARIABLES_H
|
||||
#define LLVM_CODEGEN_LIVEVARIABLES_H
|
||||
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/ADT/IndexedMap.h"
|
||||
#include "llvm/ADT/SmallSet.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/ADT/SparseBitVector.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
#include "llvm/CodeGen/MachineInstr.h"
|
||||
#include "llvm/Target/TargetRegisterInfo.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class MachineBasicBlock;
|
||||
class MachineRegisterInfo;
|
||||
|
||||
class LiveVariables : public MachineFunctionPass {
|
||||
public:
|
||||
static char ID; // Pass identification, replacement for typeid
|
||||
LiveVariables() : MachineFunctionPass(ID) {
|
||||
initializeLiveVariablesPass(*PassRegistry::getPassRegistry());
|
||||
}
|
||||
|
||||
/// VarInfo - This represents the regions where a virtual register is live in
|
||||
/// the program. We represent this with three different pieces of
|
||||
/// information: the set of blocks in which the instruction is live
|
||||
/// throughout, the set of blocks in which the instruction is actually used,
|
||||
/// and the set of non-phi instructions that are the last users of the value.
|
||||
///
|
||||
/// In the common case where a value is defined and killed in the same block,
|
||||
/// There is one killing instruction, and AliveBlocks is empty.
|
||||
///
|
||||
/// Otherwise, the value is live out of the block. If the value is live
|
||||
/// throughout any blocks, these blocks are listed in AliveBlocks. Blocks
|
||||
/// where the liveness range ends are not included in AliveBlocks, instead
|
||||
/// being captured by the Kills set. In these blocks, the value is live into
|
||||
/// the block (unless the value is defined and killed in the same block) and
|
||||
/// lives until the specified instruction. Note that there cannot ever be a
|
||||
/// value whose Kills set contains two instructions from the same basic block.
|
||||
///
|
||||
/// PHI nodes complicate things a bit. If a PHI node is the last user of a
|
||||
/// value in one of its predecessor blocks, it is not listed in the kills set,
|
||||
/// but does include the predecessor block in the AliveBlocks set (unless that
|
||||
/// block also defines the value). This leads to the (perfectly sensical)
|
||||
/// situation where a value is defined in a block, and the last use is a phi
|
||||
/// node in the successor. In this case, AliveBlocks is empty (the value is
|
||||
/// not live across any blocks) and Kills is empty (phi nodes are not
|
||||
/// included). This is sensical because the value must be live to the end of
|
||||
/// the block, but is not live in any successor blocks.
|
||||
struct VarInfo {
|
||||
/// AliveBlocks - Set of blocks in which this value is alive completely
|
||||
/// through. This is a bit set which uses the basic block number as an
|
||||
/// index.
|
||||
///
|
||||
SparseBitVector<> AliveBlocks;
|
||||
|
||||
/// Kills - List of MachineInstruction's which are the last use of this
|
||||
/// virtual register (kill it) in their basic block.
|
||||
///
|
||||
std::vector<MachineInstr*> Kills;
|
||||
|
||||
/// removeKill - Delete a kill corresponding to the specified
|
||||
/// machine instruction. Returns true if there was a kill
|
||||
/// corresponding to this instruction, false otherwise.
|
||||
bool removeKill(MachineInstr *MI) {
|
||||
std::vector<MachineInstr*>::iterator
|
||||
I = std::find(Kills.begin(), Kills.end(), MI);
|
||||
if (I == Kills.end())
|
||||
return false;
|
||||
Kills.erase(I);
|
||||
return true;
|
||||
}
|
||||
|
||||
/// findKill - Find a kill instruction in MBB. Return NULL if none is found.
|
||||
MachineInstr *findKill(const MachineBasicBlock *MBB) const;
|
||||
|
||||
/// isLiveIn - Is Reg live in to MBB? This means that Reg is live through
|
||||
/// MBB, or it is killed in MBB. If Reg is only used by PHI instructions in
|
||||
/// MBB, it is not considered live in.
|
||||
bool isLiveIn(const MachineBasicBlock &MBB,
|
||||
unsigned Reg,
|
||||
MachineRegisterInfo &MRI);
|
||||
|
||||
void dump() const;
|
||||
};
|
||||
|
||||
private:
|
||||
/// VirtRegInfo - This list is a mapping from virtual register number to
|
||||
/// variable information.
|
||||
///
|
||||
IndexedMap<VarInfo, VirtReg2IndexFunctor> VirtRegInfo;
|
||||
|
||||
/// PHIJoins - list of virtual registers that are PHI joins. These registers
|
||||
/// may have multiple definitions, and they require special handling when
|
||||
/// building live intervals.
|
||||
SparseBitVector<> PHIJoins;
|
||||
|
||||
private: // Intermediate data structures
|
||||
MachineFunction *MF;
|
||||
|
||||
MachineRegisterInfo* MRI;
|
||||
|
||||
const TargetRegisterInfo *TRI;
|
||||
|
||||
// PhysRegInfo - Keep track of which instruction was the last def of a
|
||||
// physical register. This is a purely local property, because all physical
|
||||
// register references are presumed dead across basic blocks.
|
||||
MachineInstr **PhysRegDef;
|
||||
|
||||
// PhysRegInfo - Keep track of which instruction was the last use of a
|
||||
// physical register. This is a purely local property, because all physical
|
||||
// register references are presumed dead across basic blocks.
|
||||
MachineInstr **PhysRegUse;
|
||||
|
||||
SmallVector<unsigned, 4> *PHIVarInfo;
|
||||
|
||||
// DistanceMap - Keep track the distance of a MI from the start of the
|
||||
// current basic block.
|
||||
DenseMap<MachineInstr*, unsigned> DistanceMap;
|
||||
|
||||
/// HandlePhysRegKill - Add kills of Reg and its sub-registers to the
|
||||
/// uses. Pay special attention to the sub-register uses which may come below
|
||||
/// the last use of the whole register.
|
||||
bool HandlePhysRegKill(unsigned Reg, MachineInstr *MI);
|
||||
|
||||
/// HandleRegMask - Call HandlePhysRegKill for all registers clobbered by Mask.
|
||||
void HandleRegMask(const MachineOperand&);
|
||||
|
||||
void HandlePhysRegUse(unsigned Reg, MachineInstr *MI);
|
||||
void HandlePhysRegDef(unsigned Reg, MachineInstr *MI,
|
||||
SmallVector<unsigned, 4> &Defs);
|
||||
void UpdatePhysRegDefs(MachineInstr *MI, SmallVector<unsigned, 4> &Defs);
|
||||
|
||||
/// FindLastRefOrPartRef - Return the last reference or partial reference of
|
||||
/// the specified register.
|
||||
MachineInstr *FindLastRefOrPartRef(unsigned Reg);
|
||||
|
||||
/// FindLastPartialDef - Return the last partial def of the specified
|
||||
/// register. Also returns the sub-registers that're defined by the
|
||||
/// instruction.
|
||||
MachineInstr *FindLastPartialDef(unsigned Reg,
|
||||
SmallSet<unsigned,4> &PartDefRegs);
|
||||
|
||||
/// analyzePHINodes - Gather information about the PHI nodes in here. In
|
||||
/// particular, we want to map the variable information of a virtual
|
||||
/// register which is used in a PHI node. We map that to the BB the vreg
|
||||
/// is coming from.
|
||||
void analyzePHINodes(const MachineFunction& Fn);
|
||||
public:
|
||||
|
||||
virtual bool runOnMachineFunction(MachineFunction &MF);
|
||||
|
||||
/// RegisterDefIsDead - Return true if the specified instruction defines the
|
||||
/// specified register, but that definition is dead.
|
||||
bool RegisterDefIsDead(MachineInstr *MI, unsigned Reg) const;
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// API to update live variable information
|
||||
|
||||
/// replaceKillInstruction - Update register kill info by replacing a kill
|
||||
/// instruction with a new one.
|
||||
void replaceKillInstruction(unsigned Reg, MachineInstr *OldMI,
|
||||
MachineInstr *NewMI);
|
||||
|
||||
/// addVirtualRegisterKilled - Add information about the fact that the
|
||||
/// specified register is killed after being used by the specified
|
||||
/// instruction. If AddIfNotFound is true, add a implicit operand if it's
|
||||
/// not found.
|
||||
void addVirtualRegisterKilled(unsigned IncomingReg, MachineInstr *MI,
|
||||
bool AddIfNotFound = false) {
|
||||
if (MI->addRegisterKilled(IncomingReg, TRI, AddIfNotFound))
|
||||
getVarInfo(IncomingReg).Kills.push_back(MI);
|
||||
}
|
||||
|
||||
/// removeVirtualRegisterKilled - Remove the specified kill of the virtual
|
||||
/// register from the live variable information. Returns true if the
|
||||
/// variable was marked as killed by the specified instruction,
|
||||
/// false otherwise.
|
||||
bool removeVirtualRegisterKilled(unsigned reg, MachineInstr *MI) {
|
||||
if (!getVarInfo(reg).removeKill(MI))
|
||||
return false;
|
||||
|
||||
bool Removed = false;
|
||||
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
||||
MachineOperand &MO = MI->getOperand(i);
|
||||
if (MO.isReg() && MO.isKill() && MO.getReg() == reg) {
|
||||
MO.setIsKill(false);
|
||||
Removed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
assert(Removed && "Register is not used by this instruction!");
|
||||
(void)Removed;
|
||||
return true;
|
||||
}
|
||||
|
||||
/// removeVirtualRegistersKilled - Remove all killed info for the specified
|
||||
/// instruction.
|
||||
void removeVirtualRegistersKilled(MachineInstr *MI);
|
||||
|
||||
/// addVirtualRegisterDead - Add information about the fact that the specified
|
||||
/// register is dead after being used by the specified instruction. If
|
||||
/// AddIfNotFound is true, add a implicit operand if it's not found.
|
||||
void addVirtualRegisterDead(unsigned IncomingReg, MachineInstr *MI,
|
||||
bool AddIfNotFound = false) {
|
||||
if (MI->addRegisterDead(IncomingReg, TRI, AddIfNotFound))
|
||||
getVarInfo(IncomingReg).Kills.push_back(MI);
|
||||
}
|
||||
|
||||
/// removeVirtualRegisterDead - Remove the specified kill of the virtual
|
||||
/// register from the live variable information. Returns true if the
|
||||
/// variable was marked dead at the specified instruction, false
|
||||
/// otherwise.
|
||||
bool removeVirtualRegisterDead(unsigned reg, MachineInstr *MI) {
|
||||
if (!getVarInfo(reg).removeKill(MI))
|
||||
return false;
|
||||
|
||||
bool Removed = false;
|
||||
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
||||
MachineOperand &MO = MI->getOperand(i);
|
||||
if (MO.isReg() && MO.isDef() && MO.getReg() == reg) {
|
||||
MO.setIsDead(false);
|
||||
Removed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert(Removed && "Register is not defined by this instruction!");
|
||||
(void)Removed;
|
||||
return true;
|
||||
}
|
||||
|
||||
void getAnalysisUsage(AnalysisUsage &AU) const;
|
||||
|
||||
virtual void releaseMemory() {
|
||||
VirtRegInfo.clear();
|
||||
}
|
||||
|
||||
/// getVarInfo - Return the VarInfo structure for the specified VIRTUAL
|
||||
/// register.
|
||||
VarInfo &getVarInfo(unsigned RegIdx);
|
||||
|
||||
void MarkVirtRegAliveInBlock(VarInfo& VRInfo, MachineBasicBlock* DefBlock,
|
||||
MachineBasicBlock *BB);
|
||||
void MarkVirtRegAliveInBlock(VarInfo& VRInfo, MachineBasicBlock* DefBlock,
|
||||
MachineBasicBlock *BB,
|
||||
std::vector<MachineBasicBlock*> &WorkList);
|
||||
void HandleVirtRegDef(unsigned reg, MachineInstr *MI);
|
||||
void HandleVirtRegUse(unsigned reg, MachineBasicBlock *MBB,
|
||||
MachineInstr *MI);
|
||||
|
||||
bool isLiveIn(unsigned Reg, const MachineBasicBlock &MBB) {
|
||||
return getVarInfo(Reg).isLiveIn(MBB, Reg, *MRI);
|
||||
}
|
||||
|
||||
/// isLiveOut - Determine if Reg is live out from MBB, when not considering
|
||||
/// PHI nodes. This means that Reg is either killed by a successor block or
|
||||
/// passed through one.
|
||||
bool isLiveOut(unsigned Reg, const MachineBasicBlock &MBB);
|
||||
|
||||
/// addNewBlock - Add a new basic block BB between DomBB and SuccBB. All
|
||||
/// variables that are live out of DomBB and live into SuccBB will be marked
|
||||
/// as passing live through BB. This method assumes that the machine code is
|
||||
/// still in SSA form.
|
||||
void addNewBlock(MachineBasicBlock *BB,
|
||||
MachineBasicBlock *DomBB,
|
||||
MachineBasicBlock *SuccBB);
|
||||
|
||||
/// isPHIJoin - Return true if Reg is a phi join register.
|
||||
bool isPHIJoin(unsigned Reg) { return PHIJoins.test(Reg); }
|
||||
|
||||
/// setPHIJoin - Mark Reg as a phi join register.
|
||||
void setPHIJoin(unsigned Reg) { PHIJoins.set(Reg); }
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
56
thirdparty/clang/include/llvm/CodeGen/MachORelocation.h
vendored
Normal file
56
thirdparty/clang/include/llvm/CodeGen/MachORelocation.h
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
//=== MachORelocation.h - Mach-O Relocation Info ----------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines the MachORelocation class.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHORELOCATION_H
|
||||
#define LLVM_CODEGEN_MACHORELOCATION_H
|
||||
|
||||
#include "llvm/Support/DataTypes.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
/// MachORelocation - This struct contains information about each relocation
|
||||
/// that needs to be emitted to the file.
|
||||
/// see <mach-o/reloc.h>
|
||||
class MachORelocation {
|
||||
uint32_t r_address; // offset in the section to what is being relocated
|
||||
uint32_t r_symbolnum; // symbol index if r_extern == 1 else section index
|
||||
bool r_pcrel; // was relocated pc-relative already
|
||||
uint8_t r_length; // length = 2 ^ r_length
|
||||
bool r_extern; //
|
||||
uint8_t r_type; // if not 0, machine-specific relocation type.
|
||||
bool r_scattered; // 1 = scattered, 0 = non-scattered
|
||||
int32_t r_value; // the value the item to be relocated is referring
|
||||
// to.
|
||||
public:
|
||||
uint32_t getPackedFields() const {
|
||||
if (r_scattered)
|
||||
return (1 << 31) | (r_pcrel << 30) | ((r_length & 3) << 28) |
|
||||
((r_type & 15) << 24) | (r_address & 0x00FFFFFF);
|
||||
else
|
||||
return (r_symbolnum << 8) | (r_pcrel << 7) | ((r_length & 3) << 5) |
|
||||
(r_extern << 4) | (r_type & 15);
|
||||
}
|
||||
uint32_t getAddress() const { return r_scattered ? r_value : r_address; }
|
||||
uint32_t getRawAddress() const { return r_address; }
|
||||
|
||||
MachORelocation(uint32_t addr, uint32_t index, bool pcrel, uint8_t len,
|
||||
bool ext, uint8_t type, bool scattered = false,
|
||||
int32_t value = 0) :
|
||||
r_address(addr), r_symbolnum(index), r_pcrel(pcrel), r_length(len),
|
||||
r_extern(ext), r_type(type), r_scattered(scattered), r_value(value) {}
|
||||
};
|
||||
|
||||
} // end llvm namespace
|
||||
|
||||
#endif // LLVM_CODEGEN_MACHORELOCATION_H
|
||||
730
thirdparty/clang/include/llvm/CodeGen/MachineBasicBlock.h
vendored
Normal file
730
thirdparty/clang/include/llvm/CodeGen/MachineBasicBlock.h
vendored
Normal file
@@ -0,0 +1,730 @@
|
||||
//===-- llvm/CodeGen/MachineBasicBlock.h ------------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Collect the sequence of machine instructions for a basic block.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINEBASICBLOCK_H
|
||||
#define LLVM_CODEGEN_MACHINEBASICBLOCK_H
|
||||
|
||||
#include "llvm/ADT/GraphTraits.h"
|
||||
#include "llvm/CodeGen/MachineInstr.h"
|
||||
#include "llvm/Support/DataTypes.h"
|
||||
#include <functional>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class Pass;
|
||||
class BasicBlock;
|
||||
class MachineFunction;
|
||||
class MCSymbol;
|
||||
class SlotIndexes;
|
||||
class StringRef;
|
||||
class raw_ostream;
|
||||
class MachineBranchProbabilityInfo;
|
||||
|
||||
template <>
|
||||
struct ilist_traits<MachineInstr> : public ilist_default_traits<MachineInstr> {
|
||||
private:
|
||||
mutable ilist_half_node<MachineInstr> Sentinel;
|
||||
|
||||
// this is only set by the MachineBasicBlock owning the LiveList
|
||||
friend class MachineBasicBlock;
|
||||
MachineBasicBlock* Parent;
|
||||
|
||||
public:
|
||||
MachineInstr *createSentinel() const {
|
||||
return static_cast<MachineInstr*>(&Sentinel);
|
||||
}
|
||||
void destroySentinel(MachineInstr *) const {}
|
||||
|
||||
MachineInstr *provideInitialHead() const { return createSentinel(); }
|
||||
MachineInstr *ensureHead(MachineInstr*) const { return createSentinel(); }
|
||||
static void noteHead(MachineInstr*, MachineInstr*) {}
|
||||
|
||||
void addNodeToList(MachineInstr* N);
|
||||
void removeNodeFromList(MachineInstr* N);
|
||||
void transferNodesFromList(ilist_traits &SrcTraits,
|
||||
ilist_iterator<MachineInstr> first,
|
||||
ilist_iterator<MachineInstr> last);
|
||||
void deleteNode(MachineInstr *N);
|
||||
private:
|
||||
void createNode(const MachineInstr &);
|
||||
};
|
||||
|
||||
class MachineBasicBlock : public ilist_node<MachineBasicBlock> {
|
||||
typedef ilist<MachineInstr> Instructions;
|
||||
Instructions Insts;
|
||||
const BasicBlock *BB;
|
||||
int Number;
|
||||
MachineFunction *xParent;
|
||||
|
||||
/// Predecessors/Successors - Keep track of the predecessor / successor
|
||||
/// basicblocks.
|
||||
std::vector<MachineBasicBlock *> Predecessors;
|
||||
std::vector<MachineBasicBlock *> Successors;
|
||||
|
||||
|
||||
/// Weights - Keep track of the weights to the successors. This vector
|
||||
/// has the same order as Successors, or it is empty if we don't use it
|
||||
/// (disable optimization).
|
||||
std::vector<uint32_t> Weights;
|
||||
typedef std::vector<uint32_t>::iterator weight_iterator;
|
||||
typedef std::vector<uint32_t>::const_iterator const_weight_iterator;
|
||||
|
||||
/// LiveIns - Keep track of the physical registers that are livein of
|
||||
/// the basicblock.
|
||||
std::vector<unsigned> LiveIns;
|
||||
|
||||
/// Alignment - Alignment of the basic block. Zero if the basic block does
|
||||
/// not need to be aligned.
|
||||
/// The alignment is specified as log2(bytes).
|
||||
unsigned Alignment;
|
||||
|
||||
/// IsLandingPad - Indicate that this basic block is entered via an
|
||||
/// exception handler.
|
||||
bool IsLandingPad;
|
||||
|
||||
/// AddressTaken - Indicate that this basic block is potentially the
|
||||
/// target of an indirect branch.
|
||||
bool AddressTaken;
|
||||
|
||||
// Intrusive list support
|
||||
MachineBasicBlock() {}
|
||||
|
||||
explicit MachineBasicBlock(MachineFunction &mf, const BasicBlock *bb);
|
||||
|
||||
~MachineBasicBlock();
|
||||
|
||||
// MachineBasicBlocks are allocated and owned by MachineFunction.
|
||||
friend class MachineFunction;
|
||||
|
||||
public:
|
||||
/// getBasicBlock - Return the LLVM basic block that this instance
|
||||
/// corresponded to originally. Note that this may be NULL if this instance
|
||||
/// does not correspond directly to an LLVM basic block.
|
||||
///
|
||||
const BasicBlock *getBasicBlock() const { return BB; }
|
||||
|
||||
/// getName - Return the name of the corresponding LLVM basic block, or
|
||||
/// "(null)".
|
||||
StringRef getName() const;
|
||||
|
||||
/// getFullName - Return a formatted string to identify this block and its
|
||||
/// parent function.
|
||||
std::string getFullName() const;
|
||||
|
||||
/// hasAddressTaken - Test whether this block is potentially the target
|
||||
/// of an indirect branch.
|
||||
bool hasAddressTaken() const { return AddressTaken; }
|
||||
|
||||
/// setHasAddressTaken - Set this block to reflect that it potentially
|
||||
/// is the target of an indirect branch.
|
||||
void setHasAddressTaken() { AddressTaken = true; }
|
||||
|
||||
/// getParent - Return the MachineFunction containing this basic block.
|
||||
///
|
||||
const MachineFunction *getParent() const { return xParent; }
|
||||
MachineFunction *getParent() { return xParent; }
|
||||
|
||||
|
||||
/// bundle_iterator - MachineBasicBlock iterator that automatically skips over
|
||||
/// MIs that are inside bundles (i.e. walk top level MIs only).
|
||||
template<typename Ty, typename IterTy>
|
||||
class bundle_iterator
|
||||
: public std::iterator<std::bidirectional_iterator_tag, Ty, ptrdiff_t> {
|
||||
IterTy MII;
|
||||
|
||||
public:
|
||||
bundle_iterator(IterTy mii) : MII(mii) {}
|
||||
|
||||
bundle_iterator(Ty &mi) : MII(mi) {
|
||||
assert(!mi.isBundledWithPred() &&
|
||||
"It's not legal to initialize bundle_iterator with a bundled MI");
|
||||
}
|
||||
bundle_iterator(Ty *mi) : MII(mi) {
|
||||
assert((!mi || !mi->isBundledWithPred()) &&
|
||||
"It's not legal to initialize bundle_iterator with a bundled MI");
|
||||
}
|
||||
// Template allows conversion from const to nonconst.
|
||||
template<class OtherTy, class OtherIterTy>
|
||||
bundle_iterator(const bundle_iterator<OtherTy, OtherIterTy> &I)
|
||||
: MII(I.getInstrIterator()) {}
|
||||
bundle_iterator() : MII(0) {}
|
||||
|
||||
Ty &operator*() const { return *MII; }
|
||||
Ty *operator->() const { return &operator*(); }
|
||||
|
||||
operator Ty*() const { return MII; }
|
||||
|
||||
bool operator==(const bundle_iterator &x) const {
|
||||
return MII == x.MII;
|
||||
}
|
||||
bool operator!=(const bundle_iterator &x) const {
|
||||
return !operator==(x);
|
||||
}
|
||||
|
||||
// Increment and decrement operators...
|
||||
bundle_iterator &operator--() { // predecrement - Back up
|
||||
do --MII;
|
||||
while (MII->isBundledWithPred());
|
||||
return *this;
|
||||
}
|
||||
bundle_iterator &operator++() { // preincrement - Advance
|
||||
while (MII->isBundledWithSucc())
|
||||
++MII;
|
||||
++MII;
|
||||
return *this;
|
||||
}
|
||||
bundle_iterator operator--(int) { // postdecrement operators...
|
||||
bundle_iterator tmp = *this;
|
||||
--*this;
|
||||
return tmp;
|
||||
}
|
||||
bundle_iterator operator++(int) { // postincrement operators...
|
||||
bundle_iterator tmp = *this;
|
||||
++*this;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
IterTy getInstrIterator() const {
|
||||
return MII;
|
||||
}
|
||||
};
|
||||
|
||||
typedef Instructions::iterator instr_iterator;
|
||||
typedef Instructions::const_iterator const_instr_iterator;
|
||||
typedef std::reverse_iterator<instr_iterator> reverse_instr_iterator;
|
||||
typedef
|
||||
std::reverse_iterator<const_instr_iterator> const_reverse_instr_iterator;
|
||||
|
||||
typedef
|
||||
bundle_iterator<MachineInstr,instr_iterator> iterator;
|
||||
typedef
|
||||
bundle_iterator<const MachineInstr,const_instr_iterator> const_iterator;
|
||||
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
|
||||
typedef std::reverse_iterator<iterator> reverse_iterator;
|
||||
|
||||
|
||||
unsigned size() const { return (unsigned)Insts.size(); }
|
||||
bool empty() const { return Insts.empty(); }
|
||||
|
||||
MachineInstr& front() { return Insts.front(); }
|
||||
MachineInstr& back() { return Insts.back(); }
|
||||
const MachineInstr& front() const { return Insts.front(); }
|
||||
const MachineInstr& back() const { return Insts.back(); }
|
||||
|
||||
instr_iterator instr_begin() { return Insts.begin(); }
|
||||
const_instr_iterator instr_begin() const { return Insts.begin(); }
|
||||
instr_iterator instr_end() { return Insts.end(); }
|
||||
const_instr_iterator instr_end() const { return Insts.end(); }
|
||||
reverse_instr_iterator instr_rbegin() { return Insts.rbegin(); }
|
||||
const_reverse_instr_iterator instr_rbegin() const { return Insts.rbegin(); }
|
||||
reverse_instr_iterator instr_rend () { return Insts.rend(); }
|
||||
const_reverse_instr_iterator instr_rend () const { return Insts.rend(); }
|
||||
|
||||
iterator begin() { return instr_begin(); }
|
||||
const_iterator begin() const { return instr_begin(); }
|
||||
iterator end () { return instr_end(); }
|
||||
const_iterator end () const { return instr_end(); }
|
||||
reverse_iterator rbegin() { return instr_rbegin(); }
|
||||
const_reverse_iterator rbegin() const { return instr_rbegin(); }
|
||||
reverse_iterator rend () { return instr_rend(); }
|
||||
const_reverse_iterator rend () const { return instr_rend(); }
|
||||
|
||||
|
||||
// Machine-CFG iterators
|
||||
typedef std::vector<MachineBasicBlock *>::iterator pred_iterator;
|
||||
typedef std::vector<MachineBasicBlock *>::const_iterator const_pred_iterator;
|
||||
typedef std::vector<MachineBasicBlock *>::iterator succ_iterator;
|
||||
typedef std::vector<MachineBasicBlock *>::const_iterator const_succ_iterator;
|
||||
typedef std::vector<MachineBasicBlock *>::reverse_iterator
|
||||
pred_reverse_iterator;
|
||||
typedef std::vector<MachineBasicBlock *>::const_reverse_iterator
|
||||
const_pred_reverse_iterator;
|
||||
typedef std::vector<MachineBasicBlock *>::reverse_iterator
|
||||
succ_reverse_iterator;
|
||||
typedef std::vector<MachineBasicBlock *>::const_reverse_iterator
|
||||
const_succ_reverse_iterator;
|
||||
|
||||
pred_iterator pred_begin() { return Predecessors.begin(); }
|
||||
const_pred_iterator pred_begin() const { return Predecessors.begin(); }
|
||||
pred_iterator pred_end() { return Predecessors.end(); }
|
||||
const_pred_iterator pred_end() const { return Predecessors.end(); }
|
||||
pred_reverse_iterator pred_rbegin()
|
||||
{ return Predecessors.rbegin();}
|
||||
const_pred_reverse_iterator pred_rbegin() const
|
||||
{ return Predecessors.rbegin();}
|
||||
pred_reverse_iterator pred_rend()
|
||||
{ return Predecessors.rend(); }
|
||||
const_pred_reverse_iterator pred_rend() const
|
||||
{ return Predecessors.rend(); }
|
||||
unsigned pred_size() const {
|
||||
return (unsigned)Predecessors.size();
|
||||
}
|
||||
bool pred_empty() const { return Predecessors.empty(); }
|
||||
succ_iterator succ_begin() { return Successors.begin(); }
|
||||
const_succ_iterator succ_begin() const { return Successors.begin(); }
|
||||
succ_iterator succ_end() { return Successors.end(); }
|
||||
const_succ_iterator succ_end() const { return Successors.end(); }
|
||||
succ_reverse_iterator succ_rbegin()
|
||||
{ return Successors.rbegin(); }
|
||||
const_succ_reverse_iterator succ_rbegin() const
|
||||
{ return Successors.rbegin(); }
|
||||
succ_reverse_iterator succ_rend()
|
||||
{ return Successors.rend(); }
|
||||
const_succ_reverse_iterator succ_rend() const
|
||||
{ return Successors.rend(); }
|
||||
unsigned succ_size() const {
|
||||
return (unsigned)Successors.size();
|
||||
}
|
||||
bool succ_empty() const { return Successors.empty(); }
|
||||
|
||||
// LiveIn management methods.
|
||||
|
||||
/// addLiveIn - Add the specified register as a live in. Note that it
|
||||
/// is an error to add the same register to the same set more than once.
|
||||
void addLiveIn(unsigned Reg) { LiveIns.push_back(Reg); }
|
||||
|
||||
/// removeLiveIn - Remove the specified register from the live in set.
|
||||
///
|
||||
void removeLiveIn(unsigned Reg);
|
||||
|
||||
/// isLiveIn - Return true if the specified register is in the live in set.
|
||||
///
|
||||
bool isLiveIn(unsigned Reg) const;
|
||||
|
||||
// Iteration support for live in sets. These sets are kept in sorted
|
||||
// order by their register number.
|
||||
typedef std::vector<unsigned>::const_iterator livein_iterator;
|
||||
livein_iterator livein_begin() const { return LiveIns.begin(); }
|
||||
livein_iterator livein_end() const { return LiveIns.end(); }
|
||||
bool livein_empty() const { return LiveIns.empty(); }
|
||||
|
||||
/// getAlignment - Return alignment of the basic block.
|
||||
/// The alignment is specified as log2(bytes).
|
||||
///
|
||||
unsigned getAlignment() const { return Alignment; }
|
||||
|
||||
/// setAlignment - Set alignment of the basic block.
|
||||
/// The alignment is specified as log2(bytes).
|
||||
///
|
||||
void setAlignment(unsigned Align) { Alignment = Align; }
|
||||
|
||||
/// isLandingPad - Returns true if the block is a landing pad. That is
|
||||
/// this basic block is entered via an exception handler.
|
||||
bool isLandingPad() const { return IsLandingPad; }
|
||||
|
||||
/// setIsLandingPad - Indicates the block is a landing pad. That is
|
||||
/// this basic block is entered via an exception handler.
|
||||
void setIsLandingPad(bool V = true) { IsLandingPad = V; }
|
||||
|
||||
/// getLandingPadSuccessor - If this block has a successor that is a landing
|
||||
/// pad, return it. Otherwise return NULL.
|
||||
const MachineBasicBlock *getLandingPadSuccessor() const;
|
||||
|
||||
// Code Layout methods.
|
||||
|
||||
/// moveBefore/moveAfter - move 'this' block before or after the specified
|
||||
/// block. This only moves the block, it does not modify the CFG or adjust
|
||||
/// potential fall-throughs at the end of the block.
|
||||
void moveBefore(MachineBasicBlock *NewAfter);
|
||||
void moveAfter(MachineBasicBlock *NewBefore);
|
||||
|
||||
/// updateTerminator - Update the terminator instructions in block to account
|
||||
/// for changes to the layout. If the block previously used a fallthrough,
|
||||
/// it may now need a branch, and if it previously used branching it may now
|
||||
/// be able to use a fallthrough.
|
||||
void updateTerminator();
|
||||
|
||||
// Machine-CFG mutators
|
||||
|
||||
/// addSuccessor - Add succ as a successor of this MachineBasicBlock.
|
||||
/// The Predecessors list of succ is automatically updated. WEIGHT
|
||||
/// parameter is stored in Weights list and it may be used by
|
||||
/// MachineBranchProbabilityInfo analysis to calculate branch probability.
|
||||
///
|
||||
/// Note that duplicate Machine CFG edges are not allowed.
|
||||
///
|
||||
void addSuccessor(MachineBasicBlock *succ, uint32_t weight = 0);
|
||||
|
||||
/// removeSuccessor - Remove successor from the successors list of this
|
||||
/// MachineBasicBlock. The Predecessors list of succ is automatically updated.
|
||||
///
|
||||
void removeSuccessor(MachineBasicBlock *succ);
|
||||
|
||||
/// removeSuccessor - Remove specified successor from the successors list of
|
||||
/// this MachineBasicBlock. The Predecessors list of succ is automatically
|
||||
/// updated. Return the iterator to the element after the one removed.
|
||||
///
|
||||
succ_iterator removeSuccessor(succ_iterator I);
|
||||
|
||||
/// replaceSuccessor - Replace successor OLD with NEW and update weight info.
|
||||
///
|
||||
void replaceSuccessor(MachineBasicBlock *Old, MachineBasicBlock *New);
|
||||
|
||||
|
||||
/// transferSuccessors - Transfers all the successors from MBB to this
|
||||
/// machine basic block (i.e., copies all the successors fromMBB and
|
||||
/// remove all the successors from fromMBB).
|
||||
void transferSuccessors(MachineBasicBlock *fromMBB);
|
||||
|
||||
/// transferSuccessorsAndUpdatePHIs - Transfers all the successors, as
|
||||
/// in transferSuccessors, and update PHI operands in the successor blocks
|
||||
/// which refer to fromMBB to refer to this.
|
||||
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *fromMBB);
|
||||
|
||||
/// isPredecessor - Return true if the specified MBB is a predecessor of this
|
||||
/// block.
|
||||
bool isPredecessor(const MachineBasicBlock *MBB) const;
|
||||
|
||||
/// isSuccessor - Return true if the specified MBB is a successor of this
|
||||
/// block.
|
||||
bool isSuccessor(const MachineBasicBlock *MBB) const;
|
||||
|
||||
/// isLayoutSuccessor - Return true if the specified MBB will be emitted
|
||||
/// immediately after this block, such that if this block exits by
|
||||
/// falling through, control will transfer to the specified MBB. Note
|
||||
/// that MBB need not be a successor at all, for example if this block
|
||||
/// ends with an unconditional branch to some other block.
|
||||
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const;
|
||||
|
||||
/// canFallThrough - Return true if the block can implicitly transfer
|
||||
/// control to the block after it by falling off the end of it. This should
|
||||
/// return false if it can reach the block after it, but it uses an explicit
|
||||
/// branch to do so (e.g., a table jump). True is a conservative answer.
|
||||
bool canFallThrough();
|
||||
|
||||
/// Returns a pointer to the first instructon in this block that is not a
|
||||
/// PHINode instruction. When adding instruction to the beginning of the
|
||||
/// basic block, they should be added before the returned value, not before
|
||||
/// the first instruction, which might be PHI.
|
||||
/// Returns end() is there's no non-PHI instruction.
|
||||
iterator getFirstNonPHI();
|
||||
|
||||
/// SkipPHIsAndLabels - Return the first instruction in MBB after I that is
|
||||
/// not a PHI or a label. This is the correct point to insert copies at the
|
||||
/// beginning of a basic block.
|
||||
iterator SkipPHIsAndLabels(iterator I);
|
||||
|
||||
/// getFirstTerminator - returns an iterator to the first terminator
|
||||
/// instruction of this basic block. If a terminator does not exist,
|
||||
/// it returns end()
|
||||
iterator getFirstTerminator();
|
||||
const_iterator getFirstTerminator() const;
|
||||
|
||||
/// getFirstInstrTerminator - Same getFirstTerminator but it ignores bundles
|
||||
/// and return an instr_iterator instead.
|
||||
instr_iterator getFirstInstrTerminator();
|
||||
|
||||
/// getLastNonDebugInstr - returns an iterator to the last non-debug
|
||||
/// instruction in the basic block, or end()
|
||||
iterator getLastNonDebugInstr();
|
||||
const_iterator getLastNonDebugInstr() const;
|
||||
|
||||
/// SplitCriticalEdge - Split the critical edge from this block to the
|
||||
/// given successor block, and return the newly created block, or null
|
||||
/// if splitting is not possible.
|
||||
///
|
||||
/// This function updates LiveVariables, MachineDominatorTree, and
|
||||
/// MachineLoopInfo, as applicable.
|
||||
MachineBasicBlock *SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P);
|
||||
|
||||
void pop_front() { Insts.pop_front(); }
|
||||
void pop_back() { Insts.pop_back(); }
|
||||
void push_back(MachineInstr *MI) { Insts.push_back(MI); }
|
||||
|
||||
/// Insert MI into the instruction list before I, possibly inside a bundle.
|
||||
///
|
||||
/// If the insertion point is inside a bundle, MI will be added to the bundle,
|
||||
/// otherwise MI will not be added to any bundle. That means this function
|
||||
/// alone can't be used to prepend or append instructions to bundles. See
|
||||
/// MIBundleBuilder::insert() for a more reliable way of doing that.
|
||||
instr_iterator insert(instr_iterator I, MachineInstr *M);
|
||||
|
||||
/// Insert a range of instructions into the instruction list before I.
|
||||
template<typename IT>
|
||||
void insert(iterator I, IT S, IT E) {
|
||||
Insts.insert(I.getInstrIterator(), S, E);
|
||||
}
|
||||
|
||||
/// Insert MI into the instruction list before I.
|
||||
iterator insert(iterator I, MachineInstr *MI) {
|
||||
assert(!MI->isBundledWithPred() && !MI->isBundledWithSucc() &&
|
||||
"Cannot insert instruction with bundle flags");
|
||||
return Insts.insert(I.getInstrIterator(), MI);
|
||||
}
|
||||
|
||||
/// Insert MI into the instruction list after I.
|
||||
iterator insertAfter(iterator I, MachineInstr *MI) {
|
||||
assert(!MI->isBundledWithPred() && !MI->isBundledWithSucc() &&
|
||||
"Cannot insert instruction with bundle flags");
|
||||
return Insts.insertAfter(I.getInstrIterator(), MI);
|
||||
}
|
||||
|
||||
/// Remove an instruction from the instruction list and delete it.
|
||||
///
|
||||
/// If the instruction is part of a bundle, the other instructions in the
|
||||
/// bundle will still be bundled after removing the single instruction.
|
||||
instr_iterator erase(instr_iterator I);
|
||||
|
||||
/// Remove an instruction from the instruction list and delete it.
|
||||
///
|
||||
/// If the instruction is part of a bundle, the other instructions in the
|
||||
/// bundle will still be bundled after removing the single instruction.
|
||||
instr_iterator erase_instr(MachineInstr *I) {
|
||||
return erase(instr_iterator(I));
|
||||
}
|
||||
|
||||
/// Remove a range of instructions from the instruction list and delete them.
|
||||
iterator erase(iterator I, iterator E) {
|
||||
return Insts.erase(I.getInstrIterator(), E.getInstrIterator());
|
||||
}
|
||||
|
||||
/// Remove an instruction or bundle from the instruction list and delete it.
|
||||
///
|
||||
/// If I points to a bundle of instructions, they are all erased.
|
||||
iterator erase(iterator I) {
|
||||
return erase(I, llvm::next(I));
|
||||
}
|
||||
|
||||
/// Remove an instruction from the instruction list and delete it.
|
||||
///
|
||||
/// If I is the head of a bundle of instructions, the whole bundle will be
|
||||
/// erased.
|
||||
iterator erase(MachineInstr *I) {
|
||||
return erase(iterator(I));
|
||||
}
|
||||
|
||||
/// Remove the unbundled instruction from the instruction list without
|
||||
/// deleting it.
|
||||
///
|
||||
/// This function can not be used to remove bundled instructions, use
|
||||
/// remove_instr to remove individual instructions from a bundle.
|
||||
MachineInstr *remove(MachineInstr *I) {
|
||||
assert(!I->isBundled() && "Cannot remove bundled instructions");
|
||||
return Insts.remove(I);
|
||||
}
|
||||
|
||||
/// Remove the possibly bundled instruction from the instruction list
|
||||
/// without deleting it.
|
||||
///
|
||||
/// If the instruction is part of a bundle, the other instructions in the
|
||||
/// bundle will still be bundled after removing the single instruction.
|
||||
MachineInstr *remove_instr(MachineInstr *I);
|
||||
|
||||
void clear() {
|
||||
Insts.clear();
|
||||
}
|
||||
|
||||
/// Take an instruction from MBB 'Other' at the position From, and insert it
|
||||
/// into this MBB right before 'Where'.
|
||||
///
|
||||
/// If From points to a bundle of instructions, the whole bundle is moved.
|
||||
void splice(iterator Where, MachineBasicBlock *Other, iterator From) {
|
||||
// The range splice() doesn't allow noop moves, but this one does.
|
||||
if (Where != From)
|
||||
splice(Where, Other, From, llvm::next(From));
|
||||
}
|
||||
|
||||
/// Take a block of instructions from MBB 'Other' in the range [From, To),
|
||||
/// and insert them into this MBB right before 'Where'.
|
||||
///
|
||||
/// The instruction at 'Where' must not be included in the range of
|
||||
/// instructions to move.
|
||||
void splice(iterator Where, MachineBasicBlock *Other,
|
||||
iterator From, iterator To) {
|
||||
Insts.splice(Where.getInstrIterator(), Other->Insts,
|
||||
From.getInstrIterator(), To.getInstrIterator());
|
||||
}
|
||||
|
||||
/// removeFromParent - This method unlinks 'this' from the containing
|
||||
/// function, and returns it, but does not delete it.
|
||||
MachineBasicBlock *removeFromParent();
|
||||
|
||||
/// eraseFromParent - This method unlinks 'this' from the containing
|
||||
/// function and deletes it.
|
||||
void eraseFromParent();
|
||||
|
||||
/// ReplaceUsesOfBlockWith - Given a machine basic block that branched to
|
||||
/// 'Old', change the code and CFG so that it branches to 'New' instead.
|
||||
void ReplaceUsesOfBlockWith(MachineBasicBlock *Old, MachineBasicBlock *New);
|
||||
|
||||
/// CorrectExtraCFGEdges - Various pieces of code can cause excess edges in
|
||||
/// the CFG to be inserted. If we have proven that MBB can only branch to
|
||||
/// DestA and DestB, remove any other MBB successors from the CFG. DestA and
|
||||
/// DestB can be null. Besides DestA and DestB, retain other edges leading
|
||||
/// to LandingPads (currently there can be only one; we don't check or require
|
||||
/// that here). Note it is possible that DestA and/or DestB are LandingPads.
|
||||
bool CorrectExtraCFGEdges(MachineBasicBlock *DestA,
|
||||
MachineBasicBlock *DestB,
|
||||
bool isCond);
|
||||
|
||||
/// findDebugLoc - find the next valid DebugLoc starting at MBBI, skipping
|
||||
/// any DBG_VALUE instructions. Return UnknownLoc if there is none.
|
||||
DebugLoc findDebugLoc(instr_iterator MBBI);
|
||||
DebugLoc findDebugLoc(iterator MBBI) {
|
||||
return findDebugLoc(MBBI.getInstrIterator());
|
||||
}
|
||||
|
||||
/// Possible outcome of a register liveness query to computeRegisterLiveness()
|
||||
enum LivenessQueryResult {
|
||||
LQR_Live, ///< Register is known to be live.
|
||||
LQR_OverlappingLive, ///< Register itself is not live, but some overlapping
|
||||
///< register is.
|
||||
LQR_Dead, ///< Register is known to be dead.
|
||||
LQR_Unknown ///< Register liveness not decidable from local
|
||||
///< neighborhood.
|
||||
};
|
||||
|
||||
/// computeRegisterLiveness - Return whether (physical) register \c Reg
|
||||
/// has been <def>ined and not <kill>ed as of just before \c MI.
|
||||
///
|
||||
/// Search is localised to a neighborhood of
|
||||
/// \c Neighborhood instructions before (searching for defs or kills) and
|
||||
/// Neighborhood instructions after (searching just for defs) MI.
|
||||
///
|
||||
/// \c Reg must be a physical register.
|
||||
LivenessQueryResult computeRegisterLiveness(const TargetRegisterInfo *TRI,
|
||||
unsigned Reg, MachineInstr *MI,
|
||||
unsigned Neighborhood=10);
|
||||
|
||||
// Debugging methods.
|
||||
void dump() const;
|
||||
void print(raw_ostream &OS, SlotIndexes* = 0) const;
|
||||
|
||||
/// getNumber - MachineBasicBlocks are uniquely numbered at the function
|
||||
/// level, unless they're not in a MachineFunction yet, in which case this
|
||||
/// will return -1.
|
||||
///
|
||||
int getNumber() const { return Number; }
|
||||
void setNumber(int N) { Number = N; }
|
||||
|
||||
/// getSymbol - Return the MCSymbol for this basic block.
|
||||
///
|
||||
MCSymbol *getSymbol() const;
|
||||
|
||||
|
||||
private:
|
||||
/// getWeightIterator - Return weight iterator corresponding to the I
|
||||
/// successor iterator.
|
||||
weight_iterator getWeightIterator(succ_iterator I);
|
||||
const_weight_iterator getWeightIterator(const_succ_iterator I) const;
|
||||
|
||||
friend class MachineBranchProbabilityInfo;
|
||||
|
||||
/// getSuccWeight - Return weight of the edge from this block to MBB. This
|
||||
/// method should NOT be called directly, but by using getEdgeWeight method
|
||||
/// from MachineBranchProbabilityInfo class.
|
||||
uint32_t getSuccWeight(const_succ_iterator Succ) const;
|
||||
|
||||
|
||||
// Methods used to maintain doubly linked list of blocks...
|
||||
friend struct ilist_traits<MachineBasicBlock>;
|
||||
|
||||
// Machine-CFG mutators
|
||||
|
||||
/// addPredecessor - Remove pred as a predecessor of this MachineBasicBlock.
|
||||
/// Don't do this unless you know what you're doing, because it doesn't
|
||||
/// update pred's successors list. Use pred->addSuccessor instead.
|
||||
///
|
||||
void addPredecessor(MachineBasicBlock *pred);
|
||||
|
||||
/// removePredecessor - Remove pred as a predecessor of this
|
||||
/// MachineBasicBlock. Don't do this unless you know what you're
|
||||
/// doing, because it doesn't update pred's successors list. Use
|
||||
/// pred->removeSuccessor instead.
|
||||
///
|
||||
void removePredecessor(MachineBasicBlock *pred);
|
||||
};
|
||||
|
||||
raw_ostream& operator<<(raw_ostream &OS, const MachineBasicBlock &MBB);
|
||||
|
||||
void WriteAsOperand(raw_ostream &, const MachineBasicBlock*, bool t);
|
||||
|
||||
// This is useful when building IndexedMaps keyed on basic block pointers.
|
||||
struct MBB2NumberFunctor :
|
||||
public std::unary_function<const MachineBasicBlock*, unsigned> {
|
||||
unsigned operator()(const MachineBasicBlock *MBB) const {
|
||||
return MBB->getNumber();
|
||||
}
|
||||
};
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// GraphTraits specializations for machine basic block graphs (machine-CFGs)
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
// Provide specializations of GraphTraits to be able to treat a
|
||||
// MachineFunction as a graph of MachineBasicBlocks...
|
||||
//
|
||||
|
||||
template <> struct GraphTraits<MachineBasicBlock *> {
|
||||
typedef MachineBasicBlock NodeType;
|
||||
typedef MachineBasicBlock::succ_iterator ChildIteratorType;
|
||||
|
||||
static NodeType *getEntryNode(MachineBasicBlock *BB) { return BB; }
|
||||
static inline ChildIteratorType child_begin(NodeType *N) {
|
||||
return N->succ_begin();
|
||||
}
|
||||
static inline ChildIteratorType child_end(NodeType *N) {
|
||||
return N->succ_end();
|
||||
}
|
||||
};
|
||||
|
||||
template <> struct GraphTraits<const MachineBasicBlock *> {
|
||||
typedef const MachineBasicBlock NodeType;
|
||||
typedef MachineBasicBlock::const_succ_iterator ChildIteratorType;
|
||||
|
||||
static NodeType *getEntryNode(const MachineBasicBlock *BB) { return BB; }
|
||||
static inline ChildIteratorType child_begin(NodeType *N) {
|
||||
return N->succ_begin();
|
||||
}
|
||||
static inline ChildIteratorType child_end(NodeType *N) {
|
||||
return N->succ_end();
|
||||
}
|
||||
};
|
||||
|
||||
// Provide specializations of GraphTraits to be able to treat a
|
||||
// MachineFunction as a graph of MachineBasicBlocks... and to walk it
|
||||
// in inverse order. Inverse order for a function is considered
|
||||
// to be when traversing the predecessor edges of a MBB
|
||||
// instead of the successor edges.
|
||||
//
|
||||
template <> struct GraphTraits<Inverse<MachineBasicBlock*> > {
|
||||
typedef MachineBasicBlock NodeType;
|
||||
typedef MachineBasicBlock::pred_iterator ChildIteratorType;
|
||||
static NodeType *getEntryNode(Inverse<MachineBasicBlock *> G) {
|
||||
return G.Graph;
|
||||
}
|
||||
static inline ChildIteratorType child_begin(NodeType *N) {
|
||||
return N->pred_begin();
|
||||
}
|
||||
static inline ChildIteratorType child_end(NodeType *N) {
|
||||
return N->pred_end();
|
||||
}
|
||||
};
|
||||
|
||||
template <> struct GraphTraits<Inverse<const MachineBasicBlock*> > {
|
||||
typedef const MachineBasicBlock NodeType;
|
||||
typedef MachineBasicBlock::const_pred_iterator ChildIteratorType;
|
||||
static NodeType *getEntryNode(Inverse<const MachineBasicBlock*> G) {
|
||||
return G.Graph;
|
||||
}
|
||||
static inline ChildIteratorType child_begin(NodeType *N) {
|
||||
return N->pred_begin();
|
||||
}
|
||||
static inline ChildIteratorType child_end(NodeType *N) {
|
||||
return N->pred_end();
|
||||
}
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
56
thirdparty/clang/include/llvm/CodeGen/MachineBlockFrequencyInfo.h
vendored
Normal file
56
thirdparty/clang/include/llvm/CodeGen/MachineBlockFrequencyInfo.h
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
//====----- MachineBlockFrequencyInfo.h - MachineBlock Frequency Analysis ----====//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Loops should be simplified before this analysis.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINEBLOCKFREQUENCYINFO_H
|
||||
#define LLVM_CODEGEN_MACHINEBLOCKFREQUENCYINFO_H
|
||||
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
#include "llvm/Support/BlockFrequency.h"
|
||||
#include <climits>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class MachineBasicBlock;
|
||||
class MachineBranchProbabilityInfo;
|
||||
template<class BlockT, class FunctionT, class BranchProbInfoT>
|
||||
class BlockFrequencyImpl;
|
||||
|
||||
/// MachineBlockFrequencyInfo pass uses BlockFrequencyImpl implementation to estimate
|
||||
/// machine basic block frequencies.
|
||||
class MachineBlockFrequencyInfo : public MachineFunctionPass {
|
||||
|
||||
BlockFrequencyImpl<MachineBasicBlock, MachineFunction,
|
||||
MachineBranchProbabilityInfo> *MBFI;
|
||||
|
||||
public:
|
||||
static char ID;
|
||||
|
||||
MachineBlockFrequencyInfo();
|
||||
|
||||
~MachineBlockFrequencyInfo();
|
||||
|
||||
void getAnalysisUsage(AnalysisUsage &AU) const;
|
||||
|
||||
bool runOnMachineFunction(MachineFunction &F);
|
||||
|
||||
/// getblockFreq - Return block frequency. Return 0 if we don't have the
|
||||
/// information. Please note that initial frequency is equal to 1024. It means
|
||||
/// that we should not rely on the value itself, but only on the comparison to
|
||||
/// the other block frequencies. We do this to avoid using of floating points.
|
||||
///
|
||||
BlockFrequency getBlockFreq(const MachineBasicBlock *MBB) const;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
88
thirdparty/clang/include/llvm/CodeGen/MachineBranchProbabilityInfo.h
vendored
Normal file
88
thirdparty/clang/include/llvm/CodeGen/MachineBranchProbabilityInfo.h
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
//==- MachineBranchProbabilityInfo.h - Machine Branch Probability Analysis -==//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This pass is used to evaluate branch probabilties on machine basic blocks.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINEBRANCHPROBABILITYINFO_H
|
||||
#define LLVM_CODEGEN_MACHINEBRANCHPROBABILITYINFO_H
|
||||
|
||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||
#include "llvm/Pass.h"
|
||||
#include "llvm/Support/BranchProbability.h"
|
||||
#include <climits>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class MachineBranchProbabilityInfo : public ImmutablePass {
|
||||
virtual void anchor();
|
||||
|
||||
// Default weight value. Used when we don't have information about the edge.
|
||||
// TODO: DEFAULT_WEIGHT makes sense during static predication, when none of
|
||||
// the successors have a weight yet. But it doesn't make sense when providing
|
||||
// weight to an edge that may have siblings with non-zero weights. This can
|
||||
// be handled various ways, but it's probably fine for an edge with unknown
|
||||
// weight to just "inherit" the non-zero weight of an adjacent successor.
|
||||
static const uint32_t DEFAULT_WEIGHT = 16;
|
||||
|
||||
public:
|
||||
static char ID;
|
||||
|
||||
MachineBranchProbabilityInfo() : ImmutablePass(ID) {
|
||||
PassRegistry &Registry = *PassRegistry::getPassRegistry();
|
||||
initializeMachineBranchProbabilityInfoPass(Registry);
|
||||
}
|
||||
|
||||
void getAnalysisUsage(AnalysisUsage &AU) const {
|
||||
AU.setPreservesAll();
|
||||
}
|
||||
|
||||
// Return edge weight. If we don't have any informations about it - return
|
||||
// DEFAULT_WEIGHT.
|
||||
uint32_t getEdgeWeight(const MachineBasicBlock *Src,
|
||||
const MachineBasicBlock *Dst) const;
|
||||
|
||||
// Same thing, but using a const_succ_iterator from Src. This is faster when
|
||||
// the iterator is already available.
|
||||
uint32_t getEdgeWeight(const MachineBasicBlock *Src,
|
||||
MachineBasicBlock::const_succ_iterator Dst) const;
|
||||
|
||||
// Get sum of the block successors' weights, potentially scaling them to fit
|
||||
// within 32-bits. If scaling is required, sets Scale based on the necessary
|
||||
// adjustment. Any edge weights used with the sum should be divided by Scale.
|
||||
uint32_t getSumForBlock(const MachineBasicBlock *MBB, uint32_t &Scale) const;
|
||||
|
||||
// A 'Hot' edge is an edge which probability is >= 80%.
|
||||
bool isEdgeHot(MachineBasicBlock *Src, MachineBasicBlock *Dst) const;
|
||||
|
||||
// Return a hot successor for the block BB or null if there isn't one.
|
||||
// NB: This routine's complexity is linear on the number of successors.
|
||||
MachineBasicBlock *getHotSucc(MachineBasicBlock *MBB) const;
|
||||
|
||||
// Return a probability as a fraction between 0 (0% probability) and
|
||||
// 1 (100% probability), however the value is never equal to 0, and can be 1
|
||||
// only iff SRC block has only one successor.
|
||||
// NB: This routine's complexity is linear on the number of successors of
|
||||
// Src. Querying sequentially for each successor's probability is a quadratic
|
||||
// query pattern.
|
||||
BranchProbability getEdgeProbability(MachineBasicBlock *Src,
|
||||
MachineBasicBlock *Dst) const;
|
||||
|
||||
// Print value between 0 (0% probability) and 1 (100% probability),
|
||||
// however the value is never equal to 0, and can be 1 only iff SRC block
|
||||
// has only one successor.
|
||||
raw_ostream &printEdgeProbability(raw_ostream &OS, MachineBasicBlock *Src,
|
||||
MachineBasicBlock *Dst) const;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
334
thirdparty/clang/include/llvm/CodeGen/MachineCodeEmitter.h
vendored
Normal file
334
thirdparty/clang/include/llvm/CodeGen/MachineCodeEmitter.h
vendored
Normal file
@@ -0,0 +1,334 @@
|
||||
//===-- llvm/CodeGen/MachineCodeEmitter.h - Code emission -------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines an abstract interface that is used by the machine code
|
||||
// emission framework to output the code. This allows machine code emission to
|
||||
// be separated from concerns such as resolution of call targets, and where the
|
||||
// machine code will be written (memory or disk, f.e.).
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINECODEEMITTER_H
|
||||
#define LLVM_CODEGEN_MACHINECODEEMITTER_H
|
||||
|
||||
#include "llvm/Support/DataTypes.h"
|
||||
#include "llvm/Support/DebugLoc.h"
|
||||
#include <string>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class MachineBasicBlock;
|
||||
class MachineConstantPool;
|
||||
class MachineJumpTableInfo;
|
||||
class MachineFunction;
|
||||
class MachineModuleInfo;
|
||||
class MachineRelocation;
|
||||
class Value;
|
||||
class GlobalValue;
|
||||
class Function;
|
||||
class MCSymbol;
|
||||
|
||||
/// MachineCodeEmitter - This class defines two sorts of methods: those for
|
||||
/// emitting the actual bytes of machine code, and those for emitting auxiliary
|
||||
/// structures, such as jump tables, relocations, etc.
|
||||
///
|
||||
/// Emission of machine code is complicated by the fact that we don't (in
|
||||
/// general) know the size of the machine code that we're about to emit before
|
||||
/// we emit it. As such, we preallocate a certain amount of memory, and set the
|
||||
/// BufferBegin/BufferEnd pointers to the start and end of the buffer. As we
|
||||
/// emit machine instructions, we advance the CurBufferPtr to indicate the
|
||||
/// location of the next byte to emit. In the case of a buffer overflow (we
|
||||
/// need to emit more machine code than we have allocated space for), the
|
||||
/// CurBufferPtr will saturate to BufferEnd and ignore stores. Once the entire
|
||||
/// function has been emitted, the overflow condition is checked, and if it has
|
||||
/// occurred, more memory is allocated, and we reemit the code into it.
|
||||
///
|
||||
class MachineCodeEmitter {
|
||||
virtual void anchor();
|
||||
protected:
|
||||
/// BufferBegin/BufferEnd - Pointers to the start and end of the memory
|
||||
/// allocated for this code buffer.
|
||||
uint8_t *BufferBegin, *BufferEnd;
|
||||
/// CurBufferPtr - Pointer to the next byte of memory to fill when emitting
|
||||
/// code. This is guaranteed to be in the range [BufferBegin,BufferEnd]. If
|
||||
/// this pointer is at BufferEnd, it will never move due to code emission, and
|
||||
/// all code emission requests will be ignored (this is the buffer overflow
|
||||
/// condition).
|
||||
uint8_t *CurBufferPtr;
|
||||
|
||||
public:
|
||||
virtual ~MachineCodeEmitter() {}
|
||||
|
||||
/// startFunction - This callback is invoked when the specified function is
|
||||
/// about to be code generated. This initializes the BufferBegin/End/Ptr
|
||||
/// fields.
|
||||
///
|
||||
virtual void startFunction(MachineFunction &F) = 0;
|
||||
|
||||
/// finishFunction - This callback is invoked when the specified function has
|
||||
/// finished code generation. If a buffer overflow has occurred, this method
|
||||
/// returns true (the callee is required to try again), otherwise it returns
|
||||
/// false.
|
||||
///
|
||||
virtual bool finishFunction(MachineFunction &F) = 0;
|
||||
|
||||
/// emitByte - This callback is invoked when a byte needs to be written to the
|
||||
/// output stream.
|
||||
///
|
||||
void emitByte(uint8_t B) {
|
||||
if (CurBufferPtr != BufferEnd)
|
||||
*CurBufferPtr++ = B;
|
||||
}
|
||||
|
||||
/// emitWordLE - This callback is invoked when a 32-bit word needs to be
|
||||
/// written to the output stream in little-endian format.
|
||||
///
|
||||
void emitWordLE(uint32_t W) {
|
||||
if (4 <= BufferEnd-CurBufferPtr) {
|
||||
emitWordLEInto(CurBufferPtr, W);
|
||||
} else {
|
||||
CurBufferPtr = BufferEnd;
|
||||
}
|
||||
}
|
||||
|
||||
/// emitWordLEInto - This callback is invoked when a 32-bit word needs to be
|
||||
/// written to an arbitrary buffer in little-endian format. Buf must have at
|
||||
/// least 4 bytes of available space.
|
||||
///
|
||||
static void emitWordLEInto(uint8_t *&Buf, uint32_t W) {
|
||||
*Buf++ = (uint8_t)(W >> 0);
|
||||
*Buf++ = (uint8_t)(W >> 8);
|
||||
*Buf++ = (uint8_t)(W >> 16);
|
||||
*Buf++ = (uint8_t)(W >> 24);
|
||||
}
|
||||
|
||||
/// emitWordBE - This callback is invoked when a 32-bit word needs to be
|
||||
/// written to the output stream in big-endian format.
|
||||
///
|
||||
void emitWordBE(uint32_t W) {
|
||||
if (4 <= BufferEnd-CurBufferPtr) {
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 24);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 16);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 8);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 0);
|
||||
} else {
|
||||
CurBufferPtr = BufferEnd;
|
||||
}
|
||||
}
|
||||
|
||||
/// emitDWordLE - This callback is invoked when a 64-bit word needs to be
|
||||
/// written to the output stream in little-endian format.
|
||||
///
|
||||
void emitDWordLE(uint64_t W) {
|
||||
if (8 <= BufferEnd-CurBufferPtr) {
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 0);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 8);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 16);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 24);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 32);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 40);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 48);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 56);
|
||||
} else {
|
||||
CurBufferPtr = BufferEnd;
|
||||
}
|
||||
}
|
||||
|
||||
/// emitDWordBE - This callback is invoked when a 64-bit word needs to be
|
||||
/// written to the output stream in big-endian format.
|
||||
///
|
||||
void emitDWordBE(uint64_t W) {
|
||||
if (8 <= BufferEnd-CurBufferPtr) {
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 56);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 48);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 40);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 32);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 24);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 16);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 8);
|
||||
*CurBufferPtr++ = (uint8_t)(W >> 0);
|
||||
} else {
|
||||
CurBufferPtr = BufferEnd;
|
||||
}
|
||||
}
|
||||
|
||||
/// emitAlignment - Move the CurBufferPtr pointer up to the specified
|
||||
/// alignment (saturated to BufferEnd of course).
|
||||
void emitAlignment(unsigned Alignment) {
|
||||
if (Alignment == 0) Alignment = 1;
|
||||
|
||||
if(Alignment <= (uintptr_t)(BufferEnd-CurBufferPtr)) {
|
||||
// Move the current buffer ptr up to the specified alignment.
|
||||
CurBufferPtr =
|
||||
(uint8_t*)(((uintptr_t)CurBufferPtr+Alignment-1) &
|
||||
~(uintptr_t)(Alignment-1));
|
||||
} else {
|
||||
CurBufferPtr = BufferEnd;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// emitULEB128Bytes - This callback is invoked when a ULEB128 needs to be
|
||||
/// written to the output stream.
|
||||
void emitULEB128Bytes(uint64_t Value) {
|
||||
do {
|
||||
uint8_t Byte = Value & 0x7f;
|
||||
Value >>= 7;
|
||||
if (Value) Byte |= 0x80;
|
||||
emitByte(Byte);
|
||||
} while (Value);
|
||||
}
|
||||
|
||||
/// emitSLEB128Bytes - This callback is invoked when a SLEB128 needs to be
|
||||
/// written to the output stream.
|
||||
void emitSLEB128Bytes(uint64_t Value) {
|
||||
uint64_t Sign = Value >> (8 * sizeof(Value) - 1);
|
||||
bool IsMore;
|
||||
|
||||
do {
|
||||
uint8_t Byte = Value & 0x7f;
|
||||
Value >>= 7;
|
||||
IsMore = Value != Sign || ((Byte ^ Sign) & 0x40) != 0;
|
||||
if (IsMore) Byte |= 0x80;
|
||||
emitByte(Byte);
|
||||
} while (IsMore);
|
||||
}
|
||||
|
||||
/// emitString - This callback is invoked when a String needs to be
|
||||
/// written to the output stream.
|
||||
void emitString(const std::string &String) {
|
||||
for (unsigned i = 0, N = static_cast<unsigned>(String.size());
|
||||
i < N; ++i) {
|
||||
uint8_t C = String[i];
|
||||
emitByte(C);
|
||||
}
|
||||
emitByte(0);
|
||||
}
|
||||
|
||||
/// emitInt32 - Emit a int32 directive.
|
||||
void emitInt32(int32_t Value) {
|
||||
if (4 <= BufferEnd-CurBufferPtr) {
|
||||
*((uint32_t*)CurBufferPtr) = Value;
|
||||
CurBufferPtr += 4;
|
||||
} else {
|
||||
CurBufferPtr = BufferEnd;
|
||||
}
|
||||
}
|
||||
|
||||
/// emitInt64 - Emit a int64 directive.
|
||||
void emitInt64(uint64_t Value) {
|
||||
if (8 <= BufferEnd-CurBufferPtr) {
|
||||
*((uint64_t*)CurBufferPtr) = Value;
|
||||
CurBufferPtr += 8;
|
||||
} else {
|
||||
CurBufferPtr = BufferEnd;
|
||||
}
|
||||
}
|
||||
|
||||
/// emitInt32At - Emit the Int32 Value in Addr.
|
||||
void emitInt32At(uintptr_t *Addr, uintptr_t Value) {
|
||||
if (Addr >= (uintptr_t*)BufferBegin && Addr < (uintptr_t*)BufferEnd)
|
||||
(*(uint32_t*)Addr) = (uint32_t)Value;
|
||||
}
|
||||
|
||||
/// emitInt64At - Emit the Int64 Value in Addr.
|
||||
void emitInt64At(uintptr_t *Addr, uintptr_t Value) {
|
||||
if (Addr >= (uintptr_t*)BufferBegin && Addr < (uintptr_t*)BufferEnd)
|
||||
(*(uint64_t*)Addr) = (uint64_t)Value;
|
||||
}
|
||||
|
||||
/// processDebugLoc - Records debug location information about a
|
||||
/// MachineInstruction. This is called before emitting any bytes associated
|
||||
/// with the instruction. Even if successive instructions have the same debug
|
||||
/// location, this method will be called for each one.
|
||||
virtual void processDebugLoc(DebugLoc DL, bool BeforePrintintInsn) {}
|
||||
|
||||
/// emitLabel - Emits a label
|
||||
virtual void emitLabel(MCSymbol *Label) = 0;
|
||||
|
||||
/// allocateSpace - Allocate a block of space in the current output buffer,
|
||||
/// returning null (and setting conditions to indicate buffer overflow) on
|
||||
/// failure. Alignment is the alignment in bytes of the buffer desired.
|
||||
virtual void *allocateSpace(uintptr_t Size, unsigned Alignment) {
|
||||
emitAlignment(Alignment);
|
||||
void *Result;
|
||||
|
||||
// Check for buffer overflow.
|
||||
if (Size >= (uintptr_t)(BufferEnd-CurBufferPtr)) {
|
||||
CurBufferPtr = BufferEnd;
|
||||
Result = 0;
|
||||
} else {
|
||||
// Allocate the space.
|
||||
Result = CurBufferPtr;
|
||||
CurBufferPtr += Size;
|
||||
}
|
||||
|
||||
return Result;
|
||||
}
|
||||
|
||||
/// StartMachineBasicBlock - This should be called by the target when a new
|
||||
/// basic block is about to be emitted. This way the MCE knows where the
|
||||
/// start of the block is, and can implement getMachineBasicBlockAddress.
|
||||
virtual void StartMachineBasicBlock(MachineBasicBlock *MBB) = 0;
|
||||
|
||||
/// getCurrentPCValue - This returns the address that the next emitted byte
|
||||
/// will be output to.
|
||||
///
|
||||
virtual uintptr_t getCurrentPCValue() const {
|
||||
return (uintptr_t)CurBufferPtr;
|
||||
}
|
||||
|
||||
/// getCurrentPCOffset - Return the offset from the start of the emitted
|
||||
/// buffer that we are currently writing to.
|
||||
virtual uintptr_t getCurrentPCOffset() const {
|
||||
return CurBufferPtr-BufferBegin;
|
||||
}
|
||||
|
||||
/// earlyResolveAddresses - True if the code emitter can use symbol addresses
|
||||
/// during code emission time. The JIT is capable of doing this because it
|
||||
/// creates jump tables or constant pools in memory on the fly while the
|
||||
/// object code emitters rely on a linker to have real addresses and should
|
||||
/// use relocations instead.
|
||||
virtual bool earlyResolveAddresses() const = 0;
|
||||
|
||||
/// addRelocation - Whenever a relocatable address is needed, it should be
|
||||
/// noted with this interface.
|
||||
virtual void addRelocation(const MachineRelocation &MR) = 0;
|
||||
|
||||
/// FIXME: These should all be handled with relocations!
|
||||
|
||||
/// getConstantPoolEntryAddress - Return the address of the 'Index' entry in
|
||||
/// the constant pool that was last emitted with the emitConstantPool method.
|
||||
///
|
||||
virtual uintptr_t getConstantPoolEntryAddress(unsigned Index) const = 0;
|
||||
|
||||
/// getJumpTableEntryAddress - Return the address of the jump table with index
|
||||
/// 'Index' in the function that last called initJumpTableInfo.
|
||||
///
|
||||
virtual uintptr_t getJumpTableEntryAddress(unsigned Index) const = 0;
|
||||
|
||||
/// getMachineBasicBlockAddress - Return the address of the specified
|
||||
/// MachineBasicBlock, only usable after the label for the MBB has been
|
||||
/// emitted.
|
||||
///
|
||||
virtual uintptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const= 0;
|
||||
|
||||
/// getLabelAddress - Return the address of the specified Label, only usable
|
||||
/// after the LabelID has been emitted.
|
||||
///
|
||||
virtual uintptr_t getLabelAddress(MCSymbol *Label) const = 0;
|
||||
|
||||
/// Specifies the MachineModuleInfo object. This is used for exception handling
|
||||
/// purposes.
|
||||
virtual void setModuleInfo(MachineModuleInfo* Info) = 0;
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
53
thirdparty/clang/include/llvm/CodeGen/MachineCodeInfo.h
vendored
Normal file
53
thirdparty/clang/include/llvm/CodeGen/MachineCodeInfo.h
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
//===-- MachineCodeInfo.h - Class used to report JIT info -------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines MachineCodeInfo, a class used by the JIT ExecutionEngine
|
||||
// to report information about the generated machine code.
|
||||
//
|
||||
// See JIT::runJITOnFunction for usage.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINECODEINFO_H
|
||||
#define LLVM_CODEGEN_MACHINECODEINFO_H
|
||||
|
||||
#include "llvm/Support/DataTypes.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class MachineCodeInfo {
|
||||
private:
|
||||
size_t Size; // Number of bytes in memory used
|
||||
void *Address; // The address of the function in memory
|
||||
|
||||
public:
|
||||
MachineCodeInfo() : Size(0), Address(0) {}
|
||||
|
||||
void setSize(size_t s) {
|
||||
Size = s;
|
||||
}
|
||||
|
||||
void setAddress(void *a) {
|
||||
Address = a;
|
||||
}
|
||||
|
||||
size_t size() const {
|
||||
return Size;
|
||||
}
|
||||
|
||||
void *address() const {
|
||||
return Address;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
174
thirdparty/clang/include/llvm/CodeGen/MachineConstantPool.h
vendored
Normal file
174
thirdparty/clang/include/llvm/CodeGen/MachineConstantPool.h
vendored
Normal file
@@ -0,0 +1,174 @@
|
||||
//===-- CodeGen/MachineConstantPool.h - Abstract Constant Pool --*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
/// @file
|
||||
/// This file declares the MachineConstantPool class which is an abstract
|
||||
/// constant pool to keep track of constants referenced by a function.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINECONSTANTPOOL_H
|
||||
#define LLVM_CODEGEN_MACHINECONSTANTPOOL_H
|
||||
|
||||
#include "llvm/ADT/DenseSet.h"
|
||||
#include <cassert>
|
||||
#include <climits>
|
||||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class Constant;
|
||||
class FoldingSetNodeID;
|
||||
class DataLayout;
|
||||
class TargetMachine;
|
||||
class Type;
|
||||
class MachineConstantPool;
|
||||
class raw_ostream;
|
||||
|
||||
/// Abstract base class for all machine specific constantpool value subclasses.
|
||||
///
|
||||
class MachineConstantPoolValue {
|
||||
virtual void anchor();
|
||||
Type *Ty;
|
||||
|
||||
public:
|
||||
explicit MachineConstantPoolValue(Type *ty) : Ty(ty) {}
|
||||
virtual ~MachineConstantPoolValue() {}
|
||||
|
||||
/// getType - get type of this MachineConstantPoolValue.
|
||||
///
|
||||
Type *getType() const { return Ty; }
|
||||
|
||||
|
||||
/// getRelocationInfo - This method classifies the entry according to
|
||||
/// whether or not it may generate a relocation entry. This must be
|
||||
/// conservative, so if it might codegen to a relocatable entry, it should say
|
||||
/// so. The return values are the same as Constant::getRelocationInfo().
|
||||
virtual unsigned getRelocationInfo() const = 0;
|
||||
|
||||
virtual int getExistingMachineCPValue(MachineConstantPool *CP,
|
||||
unsigned Alignment) = 0;
|
||||
|
||||
virtual void addSelectionDAGCSEId(FoldingSetNodeID &ID) = 0;
|
||||
|
||||
/// print - Implement operator<<
|
||||
virtual void print(raw_ostream &O) const = 0;
|
||||
};
|
||||
|
||||
inline raw_ostream &operator<<(raw_ostream &OS,
|
||||
const MachineConstantPoolValue &V) {
|
||||
V.print(OS);
|
||||
return OS;
|
||||
}
|
||||
|
||||
|
||||
/// This class is a data container for one entry in a MachineConstantPool.
|
||||
/// It contains a pointer to the value and an offset from the start of
|
||||
/// the constant pool.
|
||||
/// @brief An entry in a MachineConstantPool
|
||||
class MachineConstantPoolEntry {
|
||||
public:
|
||||
/// The constant itself.
|
||||
union {
|
||||
const Constant *ConstVal;
|
||||
MachineConstantPoolValue *MachineCPVal;
|
||||
} Val;
|
||||
|
||||
/// The required alignment for this entry. The top bit is set when Val is
|
||||
/// a target specific MachineConstantPoolValue.
|
||||
unsigned Alignment;
|
||||
|
||||
MachineConstantPoolEntry(const Constant *V, unsigned A)
|
||||
: Alignment(A) {
|
||||
Val.ConstVal = V;
|
||||
}
|
||||
MachineConstantPoolEntry(MachineConstantPoolValue *V, unsigned A)
|
||||
: Alignment(A) {
|
||||
Val.MachineCPVal = V;
|
||||
Alignment |= 1U << (sizeof(unsigned)*CHAR_BIT-1);
|
||||
}
|
||||
|
||||
/// isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry
|
||||
/// is indeed a target specific constantpool entry, not a wrapper over a
|
||||
/// Constant.
|
||||
bool isMachineConstantPoolEntry() const {
|
||||
return (int)Alignment < 0;
|
||||
}
|
||||
|
||||
int getAlignment() const {
|
||||
return Alignment & ~(1 << (sizeof(unsigned)*CHAR_BIT-1));
|
||||
}
|
||||
|
||||
Type *getType() const;
|
||||
|
||||
/// getRelocationInfo - This method classifies the entry according to
|
||||
/// whether or not it may generate a relocation entry. This must be
|
||||
/// conservative, so if it might codegen to a relocatable entry, it should say
|
||||
/// so. The return values are:
|
||||
///
|
||||
/// 0: This constant pool entry is guaranteed to never have a relocation
|
||||
/// applied to it (because it holds a simple constant like '4').
|
||||
/// 1: This entry has relocations, but the entries are guaranteed to be
|
||||
/// resolvable by the static linker, so the dynamic linker will never see
|
||||
/// them.
|
||||
/// 2: This entry may have arbitrary relocations.
|
||||
unsigned getRelocationInfo() const;
|
||||
};
|
||||
|
||||
/// The MachineConstantPool class keeps track of constants referenced by a
|
||||
/// function which must be spilled to memory. This is used for constants which
|
||||
/// are unable to be used directly as operands to instructions, which typically
|
||||
/// include floating point and large integer constants.
|
||||
///
|
||||
/// Instructions reference the address of these constant pool constants through
|
||||
/// the use of MO_ConstantPoolIndex values. When emitting assembly or machine
|
||||
/// code, these virtual address references are converted to refer to the
|
||||
/// address of the function constant pool values.
|
||||
/// @brief The machine constant pool.
|
||||
class MachineConstantPool {
|
||||
const DataLayout *TD; ///< The machine's DataLayout.
|
||||
unsigned PoolAlignment; ///< The alignment for the pool.
|
||||
std::vector<MachineConstantPoolEntry> Constants; ///< The pool of constants.
|
||||
/// MachineConstantPoolValues that use an existing MachineConstantPoolEntry.
|
||||
DenseSet<MachineConstantPoolValue*> MachineCPVsSharingEntries;
|
||||
public:
|
||||
/// @brief The only constructor.
|
||||
explicit MachineConstantPool(const DataLayout *td)
|
||||
: TD(td), PoolAlignment(1) {}
|
||||
~MachineConstantPool();
|
||||
|
||||
/// getConstantPoolAlignment - Return the alignment required by
|
||||
/// the whole constant pool, of which the first element must be aligned.
|
||||
unsigned getConstantPoolAlignment() const { return PoolAlignment; }
|
||||
|
||||
/// getConstantPoolIndex - Create a new entry in the constant pool or return
|
||||
/// an existing one. User must specify the minimum required alignment for
|
||||
/// the object.
|
||||
unsigned getConstantPoolIndex(const Constant *C, unsigned Alignment);
|
||||
unsigned getConstantPoolIndex(MachineConstantPoolValue *V,unsigned Alignment);
|
||||
|
||||
/// isEmpty - Return true if this constant pool contains no constants.
|
||||
bool isEmpty() const { return Constants.empty(); }
|
||||
|
||||
const std::vector<MachineConstantPoolEntry> &getConstants() const {
|
||||
return Constants;
|
||||
}
|
||||
|
||||
/// print - Used by the MachineFunction printer to print information about
|
||||
/// constant pool objects. Implemented in MachineFunction.cpp
|
||||
///
|
||||
void print(raw_ostream &OS) const;
|
||||
|
||||
/// dump - Call print(cerr) to be called from the debugger.
|
||||
void dump() const;
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
205
thirdparty/clang/include/llvm/CodeGen/MachineDominators.h
vendored
Normal file
205
thirdparty/clang/include/llvm/CodeGen/MachineDominators.h
vendored
Normal file
@@ -0,0 +1,205 @@
|
||||
//=- llvm/CodeGen/MachineDominators.h - Machine Dom Calculation --*- C++ -*-==//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines classes mirroring those in llvm/Analysis/Dominators.h,
|
||||
// but for target-specific code rather than target-independent IR.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINEDOMINATORS_H
|
||||
#define LLVM_CODEGEN_MACHINEDOMINATORS_H
|
||||
|
||||
#include "llvm/Analysis/DominatorInternals.h"
|
||||
#include "llvm/Analysis/Dominators.h"
|
||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||
#include "llvm/CodeGen/MachineFunction.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
template<>
|
||||
inline void DominatorTreeBase<MachineBasicBlock>::addRoot(MachineBasicBlock* MBB) {
|
||||
this->Roots.push_back(MBB);
|
||||
}
|
||||
|
||||
EXTERN_TEMPLATE_INSTANTIATION(class DomTreeNodeBase<MachineBasicBlock>);
|
||||
EXTERN_TEMPLATE_INSTANTIATION(class DominatorTreeBase<MachineBasicBlock>);
|
||||
|
||||
typedef DomTreeNodeBase<MachineBasicBlock> MachineDomTreeNode;
|
||||
|
||||
//===-------------------------------------
|
||||
/// DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to
|
||||
/// compute a normal dominator tree.
|
||||
///
|
||||
class MachineDominatorTree : public MachineFunctionPass {
|
||||
public:
|
||||
static char ID; // Pass ID, replacement for typeid
|
||||
DominatorTreeBase<MachineBasicBlock>* DT;
|
||||
|
||||
MachineDominatorTree();
|
||||
|
||||
~MachineDominatorTree();
|
||||
|
||||
DominatorTreeBase<MachineBasicBlock>& getBase() { return *DT; }
|
||||
|
||||
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
|
||||
|
||||
/// getRoots - Return the root blocks of the current CFG. This may include
|
||||
/// multiple blocks if we are computing post dominators. For forward
|
||||
/// dominators, this will always be a single block (the entry node).
|
||||
///
|
||||
inline const std::vector<MachineBasicBlock*> &getRoots() const {
|
||||
return DT->getRoots();
|
||||
}
|
||||
|
||||
inline MachineBasicBlock *getRoot() const {
|
||||
return DT->getRoot();
|
||||
}
|
||||
|
||||
inline MachineDomTreeNode *getRootNode() const {
|
||||
return DT->getRootNode();
|
||||
}
|
||||
|
||||
virtual bool runOnMachineFunction(MachineFunction &F);
|
||||
|
||||
inline bool dominates(const MachineDomTreeNode* A,
|
||||
const MachineDomTreeNode* B) const {
|
||||
return DT->dominates(A, B);
|
||||
}
|
||||
|
||||
inline bool dominates(const MachineBasicBlock* A,
|
||||
const MachineBasicBlock* B) const {
|
||||
return DT->dominates(A, B);
|
||||
}
|
||||
|
||||
// dominates - Return true if A dominates B. This performs the
|
||||
// special checks necessary if A and B are in the same basic block.
|
||||
bool dominates(const MachineInstr *A, const MachineInstr *B) const {
|
||||
const MachineBasicBlock *BBA = A->getParent(), *BBB = B->getParent();
|
||||
if (BBA != BBB) return DT->dominates(BBA, BBB);
|
||||
|
||||
// Loop through the basic block until we find A or B.
|
||||
MachineBasicBlock::const_iterator I = BBA->begin();
|
||||
for (; &*I != A && &*I != B; ++I)
|
||||
/*empty*/ ;
|
||||
|
||||
//if(!DT.IsPostDominators) {
|
||||
// A dominates B if it is found first in the basic block.
|
||||
return &*I == A;
|
||||
//} else {
|
||||
// // A post-dominates B if B is found first in the basic block.
|
||||
// return &*I == B;
|
||||
//}
|
||||
}
|
||||
|
||||
inline bool properlyDominates(const MachineDomTreeNode* A,
|
||||
const MachineDomTreeNode* B) const {
|
||||
return DT->properlyDominates(A, B);
|
||||
}
|
||||
|
||||
inline bool properlyDominates(const MachineBasicBlock* A,
|
||||
const MachineBasicBlock* B) const {
|
||||
return DT->properlyDominates(A, B);
|
||||
}
|
||||
|
||||
/// findNearestCommonDominator - Find nearest common dominator basic block
|
||||
/// for basic block A and B. If there is no such block then return NULL.
|
||||
inline MachineBasicBlock *findNearestCommonDominator(MachineBasicBlock *A,
|
||||
MachineBasicBlock *B) {
|
||||
return DT->findNearestCommonDominator(A, B);
|
||||
}
|
||||
|
||||
inline MachineDomTreeNode *operator[](MachineBasicBlock *BB) const {
|
||||
return DT->getNode(BB);
|
||||
}
|
||||
|
||||
/// getNode - return the (Post)DominatorTree node for the specified basic
|
||||
/// block. This is the same as using operator[] on this class.
|
||||
///
|
||||
inline MachineDomTreeNode *getNode(MachineBasicBlock *BB) const {
|
||||
return DT->getNode(BB);
|
||||
}
|
||||
|
||||
/// addNewBlock - Add a new node to the dominator tree information. This
|
||||
/// creates a new node as a child of DomBB dominator node,linking it into
|
||||
/// the children list of the immediate dominator.
|
||||
inline MachineDomTreeNode *addNewBlock(MachineBasicBlock *BB,
|
||||
MachineBasicBlock *DomBB) {
|
||||
return DT->addNewBlock(BB, DomBB);
|
||||
}
|
||||
|
||||
/// changeImmediateDominator - This method is used to update the dominator
|
||||
/// tree information when a node's immediate dominator changes.
|
||||
///
|
||||
inline void changeImmediateDominator(MachineBasicBlock *N,
|
||||
MachineBasicBlock* NewIDom) {
|
||||
DT->changeImmediateDominator(N, NewIDom);
|
||||
}
|
||||
|
||||
inline void changeImmediateDominator(MachineDomTreeNode *N,
|
||||
MachineDomTreeNode* NewIDom) {
|
||||
DT->changeImmediateDominator(N, NewIDom);
|
||||
}
|
||||
|
||||
/// eraseNode - Removes a node from the dominator tree. Block must not
|
||||
/// dominate any other blocks. Removes node from its immediate dominator's
|
||||
/// children list. Deletes dominator node associated with basic block BB.
|
||||
inline void eraseNode(MachineBasicBlock *BB) {
|
||||
DT->eraseNode(BB);
|
||||
}
|
||||
|
||||
/// splitBlock - BB is split and now it has one successor. Update dominator
|
||||
/// tree to reflect this change.
|
||||
inline void splitBlock(MachineBasicBlock* NewBB) {
|
||||
DT->splitBlock(NewBB);
|
||||
}
|
||||
|
||||
/// isReachableFromEntry - Return true if A is dominated by the entry
|
||||
/// block of the function containing it.
|
||||
bool isReachableFromEntry(const MachineBasicBlock *A) {
|
||||
return DT->isReachableFromEntry(A);
|
||||
}
|
||||
|
||||
virtual void releaseMemory();
|
||||
|
||||
virtual void print(raw_ostream &OS, const Module*) const;
|
||||
};
|
||||
|
||||
//===-------------------------------------
|
||||
/// DominatorTree GraphTraits specialization so the DominatorTree can be
|
||||
/// iterable by generic graph iterators.
|
||||
///
|
||||
|
||||
template<class T> struct GraphTraits;
|
||||
|
||||
template <> struct GraphTraits<MachineDomTreeNode *> {
|
||||
typedef MachineDomTreeNode NodeType;
|
||||
typedef NodeType::iterator ChildIteratorType;
|
||||
|
||||
static NodeType *getEntryNode(NodeType *N) {
|
||||
return N;
|
||||
}
|
||||
static inline ChildIteratorType child_begin(NodeType* N) {
|
||||
return N->begin();
|
||||
}
|
||||
static inline ChildIteratorType child_end(NodeType* N) {
|
||||
return N->end();
|
||||
}
|
||||
};
|
||||
|
||||
template <> struct GraphTraits<MachineDominatorTree*>
|
||||
: public GraphTraits<MachineDomTreeNode *> {
|
||||
static NodeType *getEntryNode(MachineDominatorTree *DT) {
|
||||
return DT->getRootNode();
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
564
thirdparty/clang/include/llvm/CodeGen/MachineFrameInfo.h
vendored
Normal file
564
thirdparty/clang/include/llvm/CodeGen/MachineFrameInfo.h
vendored
Normal file
@@ -0,0 +1,564 @@
|
||||
//===-- CodeGen/MachineFrameInfo.h - Abstract Stack Frame Rep. --*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// The file defines the MachineFrameInfo class.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINEFRAMEINFO_H
|
||||
#define LLVM_CODEGEN_MACHINEFRAMEINFO_H
|
||||
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/Support/DataTypes.h"
|
||||
#include <cassert>
|
||||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
class raw_ostream;
|
||||
class DataLayout;
|
||||
class TargetRegisterClass;
|
||||
class Type;
|
||||
class MachineFunction;
|
||||
class MachineBasicBlock;
|
||||
class TargetFrameLowering;
|
||||
class BitVector;
|
||||
class Value;
|
||||
class AllocaInst;
|
||||
|
||||
/// The CalleeSavedInfo class tracks the information need to locate where a
|
||||
/// callee saved register is in the current frame.
|
||||
class CalleeSavedInfo {
|
||||
unsigned Reg;
|
||||
int FrameIdx;
|
||||
|
||||
public:
|
||||
explicit CalleeSavedInfo(unsigned R, int FI = 0)
|
||||
: Reg(R), FrameIdx(FI) {}
|
||||
|
||||
// Accessors.
|
||||
unsigned getReg() const { return Reg; }
|
||||
int getFrameIdx() const { return FrameIdx; }
|
||||
void setFrameIdx(int FI) { FrameIdx = FI; }
|
||||
};
|
||||
|
||||
/// The MachineFrameInfo class represents an abstract stack frame until
|
||||
/// prolog/epilog code is inserted. This class is key to allowing stack frame
|
||||
/// representation optimizations, such as frame pointer elimination. It also
|
||||
/// allows more mundane (but still important) optimizations, such as reordering
|
||||
/// of abstract objects on the stack frame.
|
||||
///
|
||||
/// To support this, the class assigns unique integer identifiers to stack
|
||||
/// objects requested clients. These identifiers are negative integers for
|
||||
/// fixed stack objects (such as arguments passed on the stack) or nonnegative
|
||||
/// for objects that may be reordered. Instructions which refer to stack
|
||||
/// objects use a special MO_FrameIndex operand to represent these frame
|
||||
/// indexes.
|
||||
///
|
||||
/// Because this class keeps track of all references to the stack frame, it
|
||||
/// knows when a variable sized object is allocated on the stack. This is the
|
||||
/// sole condition which prevents frame pointer elimination, which is an
|
||||
/// important optimization on register-poor architectures. Because original
|
||||
/// variable sized alloca's in the source program are the only source of
|
||||
/// variable sized stack objects, it is safe to decide whether there will be
|
||||
/// any variable sized objects before all stack objects are known (for
|
||||
/// example, register allocator spill code never needs variable sized
|
||||
/// objects).
|
||||
///
|
||||
/// When prolog/epilog code emission is performed, the final stack frame is
|
||||
/// built and the machine instructions are modified to refer to the actual
|
||||
/// stack offsets of the object, eliminating all MO_FrameIndex operands from
|
||||
/// the program.
|
||||
///
|
||||
/// @brief Abstract Stack Frame Information
|
||||
class MachineFrameInfo {
|
||||
|
||||
// StackObject - Represent a single object allocated on the stack.
|
||||
struct StackObject {
|
||||
// SPOffset - The offset of this object from the stack pointer on entry to
|
||||
// the function. This field has no meaning for a variable sized element.
|
||||
int64_t SPOffset;
|
||||
|
||||
// The size of this object on the stack. 0 means a variable sized object,
|
||||
// ~0ULL means a dead object.
|
||||
uint64_t Size;
|
||||
|
||||
// Alignment - The required alignment of this stack slot.
|
||||
unsigned Alignment;
|
||||
|
||||
// isImmutable - If true, the value of the stack object is set before
|
||||
// entering the function and is not modified inside the function. By
|
||||
// default, fixed objects are immutable unless marked otherwise.
|
||||
bool isImmutable;
|
||||
|
||||
// isSpillSlot - If true the stack object is used as spill slot. It
|
||||
// cannot alias any other memory objects.
|
||||
bool isSpillSlot;
|
||||
|
||||
// MayNeedSP - If true the stack object triggered the creation of the stack
|
||||
// protector. We should allocate this object right after the stack
|
||||
// protector.
|
||||
bool MayNeedSP;
|
||||
|
||||
/// Alloca - If this stack object is originated from an Alloca instruction
|
||||
/// this value saves the original IR allocation. Can be NULL.
|
||||
const AllocaInst *Alloca;
|
||||
|
||||
// PreAllocated - If true, the object was mapped into the local frame
|
||||
// block and doesn't need additional handling for allocation beyond that.
|
||||
bool PreAllocated;
|
||||
|
||||
StackObject(uint64_t Sz, unsigned Al, int64_t SP, bool IM,
|
||||
bool isSS, bool NSP, const AllocaInst *Val)
|
||||
: SPOffset(SP), Size(Sz), Alignment(Al), isImmutable(IM),
|
||||
isSpillSlot(isSS), MayNeedSP(NSP), Alloca(Val), PreAllocated(false) {}
|
||||
};
|
||||
|
||||
/// Objects - The list of stack objects allocated...
|
||||
///
|
||||
std::vector<StackObject> Objects;
|
||||
|
||||
/// NumFixedObjects - This contains the number of fixed objects contained on
|
||||
/// the stack. Because fixed objects are stored at a negative index in the
|
||||
/// Objects list, this is also the index to the 0th object in the list.
|
||||
///
|
||||
unsigned NumFixedObjects;
|
||||
|
||||
/// HasVarSizedObjects - This boolean keeps track of whether any variable
|
||||
/// sized objects have been allocated yet.
|
||||
///
|
||||
bool HasVarSizedObjects;
|
||||
|
||||
/// FrameAddressTaken - This boolean keeps track of whether there is a call
|
||||
/// to builtin \@llvm.frameaddress.
|
||||
bool FrameAddressTaken;
|
||||
|
||||
/// ReturnAddressTaken - This boolean keeps track of whether there is a call
|
||||
/// to builtin \@llvm.returnaddress.
|
||||
bool ReturnAddressTaken;
|
||||
|
||||
/// StackSize - The prolog/epilog code inserter calculates the final stack
|
||||
/// offsets for all of the fixed size objects, updating the Objects list
|
||||
/// above. It then updates StackSize to contain the number of bytes that need
|
||||
/// to be allocated on entry to the function.
|
||||
///
|
||||
uint64_t StackSize;
|
||||
|
||||
/// OffsetAdjustment - The amount that a frame offset needs to be adjusted to
|
||||
/// have the actual offset from the stack/frame pointer. The exact usage of
|
||||
/// this is target-dependent, but it is typically used to adjust between
|
||||
/// SP-relative and FP-relative offsets. E.G., if objects are accessed via
|
||||
/// SP then OffsetAdjustment is zero; if FP is used, OffsetAdjustment is set
|
||||
/// to the distance between the initial SP and the value in FP. For many
|
||||
/// targets, this value is only used when generating debug info (via
|
||||
/// TargetRegisterInfo::getFrameIndexOffset); when generating code, the
|
||||
/// corresponding adjustments are performed directly.
|
||||
int OffsetAdjustment;
|
||||
|
||||
/// MaxAlignment - The prolog/epilog code inserter may process objects
|
||||
/// that require greater alignment than the default alignment the target
|
||||
/// provides. To handle this, MaxAlignment is set to the maximum alignment
|
||||
/// needed by the objects on the current frame. If this is greater than the
|
||||
/// native alignment maintained by the compiler, dynamic alignment code will
|
||||
/// be needed.
|
||||
///
|
||||
unsigned MaxAlignment;
|
||||
|
||||
/// AdjustsStack - Set to true if this function adjusts the stack -- e.g.,
|
||||
/// when calling another function. This is only valid during and after
|
||||
/// prolog/epilog code insertion.
|
||||
bool AdjustsStack;
|
||||
|
||||
/// HasCalls - Set to true if this function has any function calls.
|
||||
bool HasCalls;
|
||||
|
||||
/// StackProtectorIdx - The frame index for the stack protector.
|
||||
int StackProtectorIdx;
|
||||
|
||||
/// FunctionContextIdx - The frame index for the function context. Used for
|
||||
/// SjLj exceptions.
|
||||
int FunctionContextIdx;
|
||||
|
||||
/// MaxCallFrameSize - This contains the size of the largest call frame if the
|
||||
/// target uses frame setup/destroy pseudo instructions (as defined in the
|
||||
/// TargetFrameInfo class). This information is important for frame pointer
|
||||
/// elimination. If is only valid during and after prolog/epilog code
|
||||
/// insertion.
|
||||
///
|
||||
unsigned MaxCallFrameSize;
|
||||
|
||||
/// CSInfo - The prolog/epilog code inserter fills in this vector with each
|
||||
/// callee saved register saved in the frame. Beyond its use by the prolog/
|
||||
/// epilog code inserter, this data used for debug info and exception
|
||||
/// handling.
|
||||
std::vector<CalleeSavedInfo> CSInfo;
|
||||
|
||||
/// CSIValid - Has CSInfo been set yet?
|
||||
bool CSIValid;
|
||||
|
||||
/// TargetFrameLowering - Target information about frame layout.
|
||||
///
|
||||
const TargetFrameLowering &TFI;
|
||||
|
||||
/// LocalFrameObjects - References to frame indices which are mapped
|
||||
/// into the local frame allocation block. <FrameIdx, LocalOffset>
|
||||
SmallVector<std::pair<int, int64_t>, 32> LocalFrameObjects;
|
||||
|
||||
/// LocalFrameSize - Size of the pre-allocated local frame block.
|
||||
int64_t LocalFrameSize;
|
||||
|
||||
/// Required alignment of the local object blob, which is the strictest
|
||||
/// alignment of any object in it.
|
||||
unsigned LocalFrameMaxAlign;
|
||||
|
||||
/// Whether the local object blob needs to be allocated together. If not,
|
||||
/// PEI should ignore the isPreAllocated flags on the stack objects and
|
||||
/// just allocate them normally.
|
||||
bool UseLocalStackAllocationBlock;
|
||||
|
||||
/// Whether the "realign-stack" option is on.
|
||||
bool RealignOption;
|
||||
public:
|
||||
explicit MachineFrameInfo(const TargetFrameLowering &tfi, bool RealignOpt)
|
||||
: TFI(tfi), RealignOption(RealignOpt) {
|
||||
StackSize = NumFixedObjects = OffsetAdjustment = MaxAlignment = 0;
|
||||
HasVarSizedObjects = false;
|
||||
FrameAddressTaken = false;
|
||||
ReturnAddressTaken = false;
|
||||
AdjustsStack = false;
|
||||
HasCalls = false;
|
||||
StackProtectorIdx = -1;
|
||||
FunctionContextIdx = -1;
|
||||
MaxCallFrameSize = 0;
|
||||
CSIValid = false;
|
||||
LocalFrameSize = 0;
|
||||
LocalFrameMaxAlign = 0;
|
||||
UseLocalStackAllocationBlock = false;
|
||||
}
|
||||
|
||||
/// hasStackObjects - Return true if there are any stack objects in this
|
||||
/// function.
|
||||
///
|
||||
bool hasStackObjects() const { return !Objects.empty(); }
|
||||
|
||||
/// hasVarSizedObjects - This method may be called any time after instruction
|
||||
/// selection is complete to determine if the stack frame for this function
|
||||
/// contains any variable sized objects.
|
||||
///
|
||||
bool hasVarSizedObjects() const { return HasVarSizedObjects; }
|
||||
|
||||
/// getStackProtectorIndex/setStackProtectorIndex - Return the index for the
|
||||
/// stack protector object.
|
||||
///
|
||||
int getStackProtectorIndex() const { return StackProtectorIdx; }
|
||||
void setStackProtectorIndex(int I) { StackProtectorIdx = I; }
|
||||
|
||||
/// getFunctionContextIndex/setFunctionContextIndex - Return the index for the
|
||||
/// function context object. This object is used for SjLj exceptions.
|
||||
int getFunctionContextIndex() const { return FunctionContextIdx; }
|
||||
void setFunctionContextIndex(int I) { FunctionContextIdx = I; }
|
||||
|
||||
/// isFrameAddressTaken - This method may be called any time after instruction
|
||||
/// selection is complete to determine if there is a call to
|
||||
/// \@llvm.frameaddress in this function.
|
||||
bool isFrameAddressTaken() const { return FrameAddressTaken; }
|
||||
void setFrameAddressIsTaken(bool T) { FrameAddressTaken = T; }
|
||||
|
||||
/// isReturnAddressTaken - This method may be called any time after
|
||||
/// instruction selection is complete to determine if there is a call to
|
||||
/// \@llvm.returnaddress in this function.
|
||||
bool isReturnAddressTaken() const { return ReturnAddressTaken; }
|
||||
void setReturnAddressIsTaken(bool s) { ReturnAddressTaken = s; }
|
||||
|
||||
/// getObjectIndexBegin - Return the minimum frame object index.
|
||||
///
|
||||
int getObjectIndexBegin() const { return -NumFixedObjects; }
|
||||
|
||||
/// getObjectIndexEnd - Return one past the maximum frame object index.
|
||||
///
|
||||
int getObjectIndexEnd() const { return (int)Objects.size()-NumFixedObjects; }
|
||||
|
||||
/// getNumFixedObjects - Return the number of fixed objects.
|
||||
unsigned getNumFixedObjects() const { return NumFixedObjects; }
|
||||
|
||||
/// getNumObjects - Return the number of objects.
|
||||
///
|
||||
unsigned getNumObjects() const { return Objects.size(); }
|
||||
|
||||
/// mapLocalFrameObject - Map a frame index into the local object block
|
||||
void mapLocalFrameObject(int ObjectIndex, int64_t Offset) {
|
||||
LocalFrameObjects.push_back(std::pair<int, int64_t>(ObjectIndex, Offset));
|
||||
Objects[ObjectIndex + NumFixedObjects].PreAllocated = true;
|
||||
}
|
||||
|
||||
/// getLocalFrameObjectMap - Get the local offset mapping for a for an object
|
||||
std::pair<int, int64_t> getLocalFrameObjectMap(int i) {
|
||||
assert (i >= 0 && (unsigned)i < LocalFrameObjects.size() &&
|
||||
"Invalid local object reference!");
|
||||
return LocalFrameObjects[i];
|
||||
}
|
||||
|
||||
/// getLocalFrameObjectCount - Return the number of objects allocated into
|
||||
/// the local object block.
|
||||
int64_t getLocalFrameObjectCount() { return LocalFrameObjects.size(); }
|
||||
|
||||
/// setLocalFrameSize - Set the size of the local object blob.
|
||||
void setLocalFrameSize(int64_t sz) { LocalFrameSize = sz; }
|
||||
|
||||
/// getLocalFrameSize - Get the size of the local object blob.
|
||||
int64_t getLocalFrameSize() const { return LocalFrameSize; }
|
||||
|
||||
/// setLocalFrameMaxAlign - Required alignment of the local object blob,
|
||||
/// which is the strictest alignment of any object in it.
|
||||
void setLocalFrameMaxAlign(unsigned Align) { LocalFrameMaxAlign = Align; }
|
||||
|
||||
/// getLocalFrameMaxAlign - Return the required alignment of the local
|
||||
/// object blob.
|
||||
unsigned getLocalFrameMaxAlign() const { return LocalFrameMaxAlign; }
|
||||
|
||||
/// getUseLocalStackAllocationBlock - Get whether the local allocation blob
|
||||
/// should be allocated together or let PEI allocate the locals in it
|
||||
/// directly.
|
||||
bool getUseLocalStackAllocationBlock() {return UseLocalStackAllocationBlock;}
|
||||
|
||||
/// setUseLocalStackAllocationBlock - Set whether the local allocation blob
|
||||
/// should be allocated together or let PEI allocate the locals in it
|
||||
/// directly.
|
||||
void setUseLocalStackAllocationBlock(bool v) {
|
||||
UseLocalStackAllocationBlock = v;
|
||||
}
|
||||
|
||||
/// isObjectPreAllocated - Return true if the object was pre-allocated into
|
||||
/// the local block.
|
||||
bool isObjectPreAllocated(int ObjectIdx) const {
|
||||
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
|
||||
"Invalid Object Idx!");
|
||||
return Objects[ObjectIdx+NumFixedObjects].PreAllocated;
|
||||
}
|
||||
|
||||
/// getObjectSize - Return the size of the specified object.
|
||||
///
|
||||
int64_t getObjectSize(int ObjectIdx) const {
|
||||
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
|
||||
"Invalid Object Idx!");
|
||||
return Objects[ObjectIdx+NumFixedObjects].Size;
|
||||
}
|
||||
|
||||
/// setObjectSize - Change the size of the specified stack object.
|
||||
void setObjectSize(int ObjectIdx, int64_t Size) {
|
||||
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
|
||||
"Invalid Object Idx!");
|
||||
Objects[ObjectIdx+NumFixedObjects].Size = Size;
|
||||
}
|
||||
|
||||
/// getObjectAlignment - Return the alignment of the specified stack object.
|
||||
unsigned getObjectAlignment(int ObjectIdx) const {
|
||||
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
|
||||
"Invalid Object Idx!");
|
||||
return Objects[ObjectIdx+NumFixedObjects].Alignment;
|
||||
}
|
||||
|
||||
/// setObjectAlignment - Change the alignment of the specified stack object.
|
||||
void setObjectAlignment(int ObjectIdx, unsigned Align) {
|
||||
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
|
||||
"Invalid Object Idx!");
|
||||
Objects[ObjectIdx+NumFixedObjects].Alignment = Align;
|
||||
ensureMaxAlignment(Align);
|
||||
}
|
||||
|
||||
/// getObjectAllocation - Return the underlying Alloca of the specified
|
||||
/// stack object if it exists. Returns 0 if none exists.
|
||||
const AllocaInst* getObjectAllocation(int ObjectIdx) const {
|
||||
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
|
||||
"Invalid Object Idx!");
|
||||
return Objects[ObjectIdx+NumFixedObjects].Alloca;
|
||||
}
|
||||
|
||||
/// NeedsStackProtector - Returns true if the object may need stack
|
||||
/// protectors.
|
||||
bool MayNeedStackProtector(int ObjectIdx) const {
|
||||
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
|
||||
"Invalid Object Idx!");
|
||||
return Objects[ObjectIdx+NumFixedObjects].MayNeedSP;
|
||||
}
|
||||
|
||||
/// getObjectOffset - Return the assigned stack offset of the specified object
|
||||
/// from the incoming stack pointer.
|
||||
///
|
||||
int64_t getObjectOffset(int ObjectIdx) const {
|
||||
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
|
||||
"Invalid Object Idx!");
|
||||
assert(!isDeadObjectIndex(ObjectIdx) &&
|
||||
"Getting frame offset for a dead object?");
|
||||
return Objects[ObjectIdx+NumFixedObjects].SPOffset;
|
||||
}
|
||||
|
||||
/// setObjectOffset - Set the stack frame offset of the specified object. The
|
||||
/// offset is relative to the stack pointer on entry to the function.
|
||||
///
|
||||
void setObjectOffset(int ObjectIdx, int64_t SPOffset) {
|
||||
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
|
||||
"Invalid Object Idx!");
|
||||
assert(!isDeadObjectIndex(ObjectIdx) &&
|
||||
"Setting frame offset for a dead object?");
|
||||
Objects[ObjectIdx+NumFixedObjects].SPOffset = SPOffset;
|
||||
}
|
||||
|
||||
/// getStackSize - Return the number of bytes that must be allocated to hold
|
||||
/// all of the fixed size frame objects. This is only valid after
|
||||
/// Prolog/Epilog code insertion has finalized the stack frame layout.
|
||||
///
|
||||
uint64_t getStackSize() const { return StackSize; }
|
||||
|
||||
/// setStackSize - Set the size of the stack...
|
||||
///
|
||||
void setStackSize(uint64_t Size) { StackSize = Size; }
|
||||
|
||||
/// Estimate and return the size of the stack frame.
|
||||
unsigned estimateStackSize(const MachineFunction &MF) const;
|
||||
|
||||
/// getOffsetAdjustment - Return the correction for frame offsets.
|
||||
///
|
||||
int getOffsetAdjustment() const { return OffsetAdjustment; }
|
||||
|
||||
/// setOffsetAdjustment - Set the correction for frame offsets.
|
||||
///
|
||||
void setOffsetAdjustment(int Adj) { OffsetAdjustment = Adj; }
|
||||
|
||||
/// getMaxAlignment - Return the alignment in bytes that this function must be
|
||||
/// aligned to, which is greater than the default stack alignment provided by
|
||||
/// the target.
|
||||
///
|
||||
unsigned getMaxAlignment() const { return MaxAlignment; }
|
||||
|
||||
/// ensureMaxAlignment - Make sure the function is at least Align bytes
|
||||
/// aligned.
|
||||
void ensureMaxAlignment(unsigned Align);
|
||||
|
||||
/// AdjustsStack - Return true if this function adjusts the stack -- e.g.,
|
||||
/// when calling another function. This is only valid during and after
|
||||
/// prolog/epilog code insertion.
|
||||
bool adjustsStack() const { return AdjustsStack; }
|
||||
void setAdjustsStack(bool V) { AdjustsStack = V; }
|
||||
|
||||
/// hasCalls - Return true if the current function has any function calls.
|
||||
bool hasCalls() const { return HasCalls; }
|
||||
void setHasCalls(bool V) { HasCalls = V; }
|
||||
|
||||
/// getMaxCallFrameSize - Return the maximum size of a call frame that must be
|
||||
/// allocated for an outgoing function call. This is only available if
|
||||
/// CallFrameSetup/Destroy pseudo instructions are used by the target, and
|
||||
/// then only during or after prolog/epilog code insertion.
|
||||
///
|
||||
unsigned getMaxCallFrameSize() const { return MaxCallFrameSize; }
|
||||
void setMaxCallFrameSize(unsigned S) { MaxCallFrameSize = S; }
|
||||
|
||||
/// CreateFixedObject - Create a new object at a fixed location on the stack.
|
||||
/// All fixed objects should be created before other objects are created for
|
||||
/// efficiency. By default, fixed objects are immutable. This returns an
|
||||
/// index with a negative value.
|
||||
///
|
||||
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool Immutable);
|
||||
|
||||
|
||||
/// isFixedObjectIndex - Returns true if the specified index corresponds to a
|
||||
/// fixed stack object.
|
||||
bool isFixedObjectIndex(int ObjectIdx) const {
|
||||
return ObjectIdx < 0 && (ObjectIdx >= -(int)NumFixedObjects);
|
||||
}
|
||||
|
||||
/// isImmutableObjectIndex - Returns true if the specified index corresponds
|
||||
/// to an immutable object.
|
||||
bool isImmutableObjectIndex(int ObjectIdx) const {
|
||||
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
|
||||
"Invalid Object Idx!");
|
||||
return Objects[ObjectIdx+NumFixedObjects].isImmutable;
|
||||
}
|
||||
|
||||
/// isSpillSlotObjectIndex - Returns true if the specified index corresponds
|
||||
/// to a spill slot..
|
||||
bool isSpillSlotObjectIndex(int ObjectIdx) const {
|
||||
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
|
||||
"Invalid Object Idx!");
|
||||
return Objects[ObjectIdx+NumFixedObjects].isSpillSlot;
|
||||
}
|
||||
|
||||
/// isDeadObjectIndex - Returns true if the specified index corresponds to
|
||||
/// a dead object.
|
||||
bool isDeadObjectIndex(int ObjectIdx) const {
|
||||
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
|
||||
"Invalid Object Idx!");
|
||||
return Objects[ObjectIdx+NumFixedObjects].Size == ~0ULL;
|
||||
}
|
||||
|
||||
/// CreateStackObject - Create a new statically sized stack object, returning
|
||||
/// a nonnegative identifier to represent it.
|
||||
///
|
||||
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS,
|
||||
bool MayNeedSP = false, const AllocaInst *Alloca = 0);
|
||||
|
||||
/// CreateSpillStackObject - Create a new statically sized stack object that
|
||||
/// represents a spill slot, returning a nonnegative identifier to represent
|
||||
/// it.
|
||||
///
|
||||
int CreateSpillStackObject(uint64_t Size, unsigned Alignment);
|
||||
|
||||
/// RemoveStackObject - Remove or mark dead a statically sized stack object.
|
||||
///
|
||||
void RemoveStackObject(int ObjectIdx) {
|
||||
// Mark it dead.
|
||||
Objects[ObjectIdx+NumFixedObjects].Size = ~0ULL;
|
||||
}
|
||||
|
||||
/// CreateVariableSizedObject - Notify the MachineFrameInfo object that a
|
||||
/// variable sized object has been created. This must be created whenever a
|
||||
/// variable sized object is created, whether or not the index returned is
|
||||
/// actually used.
|
||||
///
|
||||
int CreateVariableSizedObject(unsigned Alignment);
|
||||
|
||||
/// getCalleeSavedInfo - Returns a reference to call saved info vector for the
|
||||
/// current function.
|
||||
const std::vector<CalleeSavedInfo> &getCalleeSavedInfo() const {
|
||||
return CSInfo;
|
||||
}
|
||||
|
||||
/// setCalleeSavedInfo - Used by prolog/epilog inserter to set the function's
|
||||
/// callee saved information.
|
||||
void setCalleeSavedInfo(const std::vector<CalleeSavedInfo> &CSI) {
|
||||
CSInfo = CSI;
|
||||
}
|
||||
|
||||
/// isCalleeSavedInfoValid - Has the callee saved info been calculated yet?
|
||||
bool isCalleeSavedInfoValid() const { return CSIValid; }
|
||||
|
||||
void setCalleeSavedInfoValid(bool v) { CSIValid = v; }
|
||||
|
||||
/// getPristineRegs - Return a set of physical registers that are pristine on
|
||||
/// entry to the MBB.
|
||||
///
|
||||
/// Pristine registers hold a value that is useless to the current function,
|
||||
/// but that must be preserved - they are callee saved registers that have not
|
||||
/// been saved yet.
|
||||
///
|
||||
/// Before the PrologueEpilogueInserter has placed the CSR spill code, this
|
||||
/// method always returns an empty set.
|
||||
BitVector getPristineRegs(const MachineBasicBlock *MBB) const;
|
||||
|
||||
/// print - Used by the MachineFunction printer to print information about
|
||||
/// stack objects. Implemented in MachineFunction.cpp
|
||||
///
|
||||
void print(const MachineFunction &MF, raw_ostream &OS) const;
|
||||
|
||||
/// dump - Print the function to stderr.
|
||||
void dump(const MachineFunction &MF) const;
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
524
thirdparty/clang/include/llvm/CodeGen/MachineFunction.h
vendored
Normal file
524
thirdparty/clang/include/llvm/CodeGen/MachineFunction.h
vendored
Normal file
@@ -0,0 +1,524 @@
|
||||
//===-- llvm/CodeGen/MachineFunction.h --------------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Collect native machine code for a function. This class contains a list of
|
||||
// MachineBasicBlock instances that make up the current compiled function.
|
||||
//
|
||||
// This class also contains pointers to various classes which hold
|
||||
// target-specific information about the generated code.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINEFUNCTION_H
|
||||
#define LLVM_CODEGEN_MACHINEFUNCTION_H
|
||||
|
||||
#include "llvm/ADT/ilist.h"
|
||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||
#include "llvm/Support/Allocator.h"
|
||||
#include "llvm/Support/ArrayRecycler.h"
|
||||
#include "llvm/Support/DebugLoc.h"
|
||||
#include "llvm/Support/Recycler.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class Value;
|
||||
class Function;
|
||||
class GCModuleInfo;
|
||||
class MachineRegisterInfo;
|
||||
class MachineFrameInfo;
|
||||
class MachineConstantPool;
|
||||
class MachineJumpTableInfo;
|
||||
class MachineModuleInfo;
|
||||
class MCContext;
|
||||
class Pass;
|
||||
class TargetMachine;
|
||||
class TargetRegisterClass;
|
||||
struct MachinePointerInfo;
|
||||
|
||||
template <>
|
||||
struct ilist_traits<MachineBasicBlock>
|
||||
: public ilist_default_traits<MachineBasicBlock> {
|
||||
mutable ilist_half_node<MachineBasicBlock> Sentinel;
|
||||
public:
|
||||
MachineBasicBlock *createSentinel() const {
|
||||
return static_cast<MachineBasicBlock*>(&Sentinel);
|
||||
}
|
||||
void destroySentinel(MachineBasicBlock *) const {}
|
||||
|
||||
MachineBasicBlock *provideInitialHead() const { return createSentinel(); }
|
||||
MachineBasicBlock *ensureHead(MachineBasicBlock*) const {
|
||||
return createSentinel();
|
||||
}
|
||||
static void noteHead(MachineBasicBlock*, MachineBasicBlock*) {}
|
||||
|
||||
void addNodeToList(MachineBasicBlock* MBB);
|
||||
void removeNodeFromList(MachineBasicBlock* MBB);
|
||||
void deleteNode(MachineBasicBlock *MBB);
|
||||
private:
|
||||
void createNode(const MachineBasicBlock &);
|
||||
};
|
||||
|
||||
/// MachineFunctionInfo - This class can be derived from and used by targets to
|
||||
/// hold private target-specific information for each MachineFunction. Objects
|
||||
/// of type are accessed/created with MF::getInfo and destroyed when the
|
||||
/// MachineFunction is destroyed.
|
||||
struct MachineFunctionInfo {
|
||||
virtual ~MachineFunctionInfo();
|
||||
};
|
||||
|
||||
class MachineFunction {
|
||||
const Function *Fn;
|
||||
const TargetMachine &Target;
|
||||
MCContext &Ctx;
|
||||
MachineModuleInfo &MMI;
|
||||
GCModuleInfo *GMI;
|
||||
|
||||
// RegInfo - Information about each register in use in the function.
|
||||
MachineRegisterInfo *RegInfo;
|
||||
|
||||
// Used to keep track of target-specific per-machine function information for
|
||||
// the target implementation.
|
||||
MachineFunctionInfo *MFInfo;
|
||||
|
||||
// Keep track of objects allocated on the stack.
|
||||
MachineFrameInfo *FrameInfo;
|
||||
|
||||
// Keep track of constants which are spilled to memory
|
||||
MachineConstantPool *ConstantPool;
|
||||
|
||||
// Keep track of jump tables for switch instructions
|
||||
MachineJumpTableInfo *JumpTableInfo;
|
||||
|
||||
// Function-level unique numbering for MachineBasicBlocks. When a
|
||||
// MachineBasicBlock is inserted into a MachineFunction is it automatically
|
||||
// numbered and this vector keeps track of the mapping from ID's to MBB's.
|
||||
std::vector<MachineBasicBlock*> MBBNumbering;
|
||||
|
||||
// Pool-allocate MachineFunction-lifetime and IR objects.
|
||||
BumpPtrAllocator Allocator;
|
||||
|
||||
// Allocation management for instructions in function.
|
||||
Recycler<MachineInstr> InstructionRecycler;
|
||||
|
||||
// Allocation management for operand arrays on instructions.
|
||||
ArrayRecycler<MachineOperand> OperandRecycler;
|
||||
|
||||
// Allocation management for basic blocks in function.
|
||||
Recycler<MachineBasicBlock> BasicBlockRecycler;
|
||||
|
||||
// List of machine basic blocks in function
|
||||
typedef ilist<MachineBasicBlock> BasicBlockListType;
|
||||
BasicBlockListType BasicBlocks;
|
||||
|
||||
/// FunctionNumber - This provides a unique ID for each function emitted in
|
||||
/// this translation unit.
|
||||
///
|
||||
unsigned FunctionNumber;
|
||||
|
||||
/// Alignment - The alignment of the function.
|
||||
unsigned Alignment;
|
||||
|
||||
/// ExposesReturnsTwice - True if the function calls setjmp or related
|
||||
/// functions with attribute "returns twice", but doesn't have
|
||||
/// the attribute itself.
|
||||
/// This is used to limit optimizations which cannot reason
|
||||
/// about the control flow of such functions.
|
||||
bool ExposesReturnsTwice;
|
||||
|
||||
/// True if the function includes MS-style inline assembly.
|
||||
bool HasMSInlineAsm;
|
||||
|
||||
MachineFunction(const MachineFunction &) LLVM_DELETED_FUNCTION;
|
||||
void operator=(const MachineFunction&) LLVM_DELETED_FUNCTION;
|
||||
public:
|
||||
MachineFunction(const Function *Fn, const TargetMachine &TM,
|
||||
unsigned FunctionNum, MachineModuleInfo &MMI,
|
||||
GCModuleInfo* GMI);
|
||||
~MachineFunction();
|
||||
|
||||
MachineModuleInfo &getMMI() const { return MMI; }
|
||||
GCModuleInfo *getGMI() const { return GMI; }
|
||||
MCContext &getContext() const { return Ctx; }
|
||||
|
||||
/// getFunction - Return the LLVM function that this machine code represents
|
||||
///
|
||||
const Function *getFunction() const { return Fn; }
|
||||
|
||||
/// getName - Return the name of the corresponding LLVM function.
|
||||
///
|
||||
StringRef getName() const;
|
||||
|
||||
/// getFunctionNumber - Return a unique ID for the current function.
|
||||
///
|
||||
unsigned getFunctionNumber() const { return FunctionNumber; }
|
||||
|
||||
/// getTarget - Return the target machine this machine code is compiled with
|
||||
///
|
||||
const TargetMachine &getTarget() const { return Target; }
|
||||
|
||||
/// getRegInfo - Return information about the registers currently in use.
|
||||
///
|
||||
MachineRegisterInfo &getRegInfo() { return *RegInfo; }
|
||||
const MachineRegisterInfo &getRegInfo() const { return *RegInfo; }
|
||||
|
||||
/// getFrameInfo - Return the frame info object for the current function.
|
||||
/// This object contains information about objects allocated on the stack
|
||||
/// frame of the current function in an abstract way.
|
||||
///
|
||||
MachineFrameInfo *getFrameInfo() { return FrameInfo; }
|
||||
const MachineFrameInfo *getFrameInfo() const { return FrameInfo; }
|
||||
|
||||
/// getJumpTableInfo - Return the jump table info object for the current
|
||||
/// function. This object contains information about jump tables in the
|
||||
/// current function. If the current function has no jump tables, this will
|
||||
/// return null.
|
||||
const MachineJumpTableInfo *getJumpTableInfo() const { return JumpTableInfo; }
|
||||
MachineJumpTableInfo *getJumpTableInfo() { return JumpTableInfo; }
|
||||
|
||||
/// getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it
|
||||
/// does already exist, allocate one.
|
||||
MachineJumpTableInfo *getOrCreateJumpTableInfo(unsigned JTEntryKind);
|
||||
|
||||
|
||||
/// getConstantPool - Return the constant pool object for the current
|
||||
/// function.
|
||||
///
|
||||
MachineConstantPool *getConstantPool() { return ConstantPool; }
|
||||
const MachineConstantPool *getConstantPool() const { return ConstantPool; }
|
||||
|
||||
/// getAlignment - Return the alignment (log2, not bytes) of the function.
|
||||
///
|
||||
unsigned getAlignment() const { return Alignment; }
|
||||
|
||||
/// setAlignment - Set the alignment (log2, not bytes) of the function.
|
||||
///
|
||||
void setAlignment(unsigned A) { Alignment = A; }
|
||||
|
||||
/// ensureAlignment - Make sure the function is at least 1 << A bytes aligned.
|
||||
void ensureAlignment(unsigned A) {
|
||||
if (Alignment < A) Alignment = A;
|
||||
}
|
||||
|
||||
/// exposesReturnsTwice - Returns true if the function calls setjmp or
|
||||
/// any other similar functions with attribute "returns twice" without
|
||||
/// having the attribute itself.
|
||||
bool exposesReturnsTwice() const {
|
||||
return ExposesReturnsTwice;
|
||||
}
|
||||
|
||||
/// setCallsSetJmp - Set a flag that indicates if there's a call to
|
||||
/// a "returns twice" function.
|
||||
void setExposesReturnsTwice(bool B) {
|
||||
ExposesReturnsTwice = B;
|
||||
}
|
||||
|
||||
/// Returns true if the function contains any MS-style inline assembly.
|
||||
bool hasMSInlineAsm() const {
|
||||
return HasMSInlineAsm;
|
||||
}
|
||||
|
||||
/// Set a flag that indicates that the function contains MS-style inline
|
||||
/// assembly.
|
||||
void setHasMSInlineAsm(bool B) {
|
||||
HasMSInlineAsm = B;
|
||||
}
|
||||
|
||||
/// getInfo - Keep track of various per-function pieces of information for
|
||||
/// backends that would like to do so.
|
||||
///
|
||||
template<typename Ty>
|
||||
Ty *getInfo() {
|
||||
if (!MFInfo) {
|
||||
// This should be just `new (Allocator.Allocate<Ty>()) Ty(*this)', but
|
||||
// that apparently breaks GCC 3.3.
|
||||
Ty *Loc = static_cast<Ty*>(Allocator.Allocate(sizeof(Ty),
|
||||
AlignOf<Ty>::Alignment));
|
||||
MFInfo = new (Loc) Ty(*this);
|
||||
}
|
||||
return static_cast<Ty*>(MFInfo);
|
||||
}
|
||||
|
||||
template<typename Ty>
|
||||
const Ty *getInfo() const {
|
||||
return const_cast<MachineFunction*>(this)->getInfo<Ty>();
|
||||
}
|
||||
|
||||
/// getBlockNumbered - MachineBasicBlocks are automatically numbered when they
|
||||
/// are inserted into the machine function. The block number for a machine
|
||||
/// basic block can be found by using the MBB::getBlockNumber method, this
|
||||
/// method provides the inverse mapping.
|
||||
///
|
||||
MachineBasicBlock *getBlockNumbered(unsigned N) const {
|
||||
assert(N < MBBNumbering.size() && "Illegal block number");
|
||||
assert(MBBNumbering[N] && "Block was removed from the machine function!");
|
||||
return MBBNumbering[N];
|
||||
}
|
||||
|
||||
/// getNumBlockIDs - Return the number of MBB ID's allocated.
|
||||
///
|
||||
unsigned getNumBlockIDs() const { return (unsigned)MBBNumbering.size(); }
|
||||
|
||||
/// RenumberBlocks - This discards all of the MachineBasicBlock numbers and
|
||||
/// recomputes them. This guarantees that the MBB numbers are sequential,
|
||||
/// dense, and match the ordering of the blocks within the function. If a
|
||||
/// specific MachineBasicBlock is specified, only that block and those after
|
||||
/// it are renumbered.
|
||||
void RenumberBlocks(MachineBasicBlock *MBBFrom = 0);
|
||||
|
||||
/// print - Print out the MachineFunction in a format suitable for debugging
|
||||
/// to the specified stream.
|
||||
///
|
||||
void print(raw_ostream &OS, SlotIndexes* = 0) const;
|
||||
|
||||
/// viewCFG - This function is meant for use from the debugger. You can just
|
||||
/// say 'call F->viewCFG()' and a ghostview window should pop up from the
|
||||
/// program, displaying the CFG of the current function with the code for each
|
||||
/// basic block inside. This depends on there being a 'dot' and 'gv' program
|
||||
/// in your path.
|
||||
///
|
||||
void viewCFG() const;
|
||||
|
||||
/// viewCFGOnly - This function is meant for use from the debugger. It works
|
||||
/// just like viewCFG, but it does not include the contents of basic blocks
|
||||
/// into the nodes, just the label. If you are only interested in the CFG
|
||||
/// this can make the graph smaller.
|
||||
///
|
||||
void viewCFGOnly() const;
|
||||
|
||||
/// dump - Print the current MachineFunction to cerr, useful for debugger use.
|
||||
///
|
||||
void dump() const;
|
||||
|
||||
/// verify - Run the current MachineFunction through the machine code
|
||||
/// verifier, useful for debugger use.
|
||||
void verify(Pass *p = NULL, const char *Banner = NULL) const;
|
||||
|
||||
// Provide accessors for the MachineBasicBlock list...
|
||||
typedef BasicBlockListType::iterator iterator;
|
||||
typedef BasicBlockListType::const_iterator const_iterator;
|
||||
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
|
||||
typedef std::reverse_iterator<iterator> reverse_iterator;
|
||||
|
||||
/// addLiveIn - Add the specified physical register as a live-in value and
|
||||
/// create a corresponding virtual register for it.
|
||||
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC);
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// BasicBlock accessor functions.
|
||||
//
|
||||
iterator begin() { return BasicBlocks.begin(); }
|
||||
const_iterator begin() const { return BasicBlocks.begin(); }
|
||||
iterator end () { return BasicBlocks.end(); }
|
||||
const_iterator end () const { return BasicBlocks.end(); }
|
||||
|
||||
reverse_iterator rbegin() { return BasicBlocks.rbegin(); }
|
||||
const_reverse_iterator rbegin() const { return BasicBlocks.rbegin(); }
|
||||
reverse_iterator rend () { return BasicBlocks.rend(); }
|
||||
const_reverse_iterator rend () const { return BasicBlocks.rend(); }
|
||||
|
||||
unsigned size() const { return (unsigned)BasicBlocks.size();}
|
||||
bool empty() const { return BasicBlocks.empty(); }
|
||||
const MachineBasicBlock &front() const { return BasicBlocks.front(); }
|
||||
MachineBasicBlock &front() { return BasicBlocks.front(); }
|
||||
const MachineBasicBlock & back() const { return BasicBlocks.back(); }
|
||||
MachineBasicBlock & back() { return BasicBlocks.back(); }
|
||||
|
||||
void push_back (MachineBasicBlock *MBB) { BasicBlocks.push_back (MBB); }
|
||||
void push_front(MachineBasicBlock *MBB) { BasicBlocks.push_front(MBB); }
|
||||
void insert(iterator MBBI, MachineBasicBlock *MBB) {
|
||||
BasicBlocks.insert(MBBI, MBB);
|
||||
}
|
||||
void splice(iterator InsertPt, iterator MBBI) {
|
||||
BasicBlocks.splice(InsertPt, BasicBlocks, MBBI);
|
||||
}
|
||||
void splice(iterator InsertPt, iterator MBBI, iterator MBBE) {
|
||||
BasicBlocks.splice(InsertPt, BasicBlocks, MBBI, MBBE);
|
||||
}
|
||||
|
||||
void remove(iterator MBBI) {
|
||||
BasicBlocks.remove(MBBI);
|
||||
}
|
||||
void erase(iterator MBBI) {
|
||||
BasicBlocks.erase(MBBI);
|
||||
}
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Internal functions used to automatically number MachineBasicBlocks
|
||||
//
|
||||
|
||||
/// getNextMBBNumber - Returns the next unique number to be assigned
|
||||
/// to a MachineBasicBlock in this MachineFunction.
|
||||
///
|
||||
unsigned addToMBBNumbering(MachineBasicBlock *MBB) {
|
||||
MBBNumbering.push_back(MBB);
|
||||
return (unsigned)MBBNumbering.size()-1;
|
||||
}
|
||||
|
||||
/// removeFromMBBNumbering - Remove the specific machine basic block from our
|
||||
/// tracker, this is only really to be used by the MachineBasicBlock
|
||||
/// implementation.
|
||||
void removeFromMBBNumbering(unsigned N) {
|
||||
assert(N < MBBNumbering.size() && "Illegal basic block #");
|
||||
MBBNumbering[N] = 0;
|
||||
}
|
||||
|
||||
/// CreateMachineInstr - Allocate a new MachineInstr. Use this instead
|
||||
/// of `new MachineInstr'.
|
||||
///
|
||||
MachineInstr *CreateMachineInstr(const MCInstrDesc &MCID,
|
||||
DebugLoc DL,
|
||||
bool NoImp = false);
|
||||
|
||||
/// CloneMachineInstr - Create a new MachineInstr which is a copy of the
|
||||
/// 'Orig' instruction, identical in all ways except the instruction
|
||||
/// has no parent, prev, or next.
|
||||
///
|
||||
/// See also TargetInstrInfo::duplicate() for target-specific fixes to cloned
|
||||
/// instructions.
|
||||
MachineInstr *CloneMachineInstr(const MachineInstr *Orig);
|
||||
|
||||
/// DeleteMachineInstr - Delete the given MachineInstr.
|
||||
///
|
||||
void DeleteMachineInstr(MachineInstr *MI);
|
||||
|
||||
/// CreateMachineBasicBlock - Allocate a new MachineBasicBlock. Use this
|
||||
/// instead of `new MachineBasicBlock'.
|
||||
///
|
||||
MachineBasicBlock *CreateMachineBasicBlock(const BasicBlock *bb = 0);
|
||||
|
||||
/// DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
|
||||
///
|
||||
void DeleteMachineBasicBlock(MachineBasicBlock *MBB);
|
||||
|
||||
/// getMachineMemOperand - Allocate a new MachineMemOperand.
|
||||
/// MachineMemOperands are owned by the MachineFunction and need not be
|
||||
/// explicitly deallocated.
|
||||
MachineMemOperand *getMachineMemOperand(MachinePointerInfo PtrInfo,
|
||||
unsigned f, uint64_t s,
|
||||
unsigned base_alignment,
|
||||
const MDNode *TBAAInfo = 0,
|
||||
const MDNode *Ranges = 0);
|
||||
|
||||
/// getMachineMemOperand - Allocate a new MachineMemOperand by copying
|
||||
/// an existing one, adjusting by an offset and using the given size.
|
||||
/// MachineMemOperands are owned by the MachineFunction and need not be
|
||||
/// explicitly deallocated.
|
||||
MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
|
||||
int64_t Offset, uint64_t Size);
|
||||
|
||||
typedef ArrayRecycler<MachineOperand>::Capacity OperandCapacity;
|
||||
|
||||
/// Allocate an array of MachineOperands. This is only intended for use by
|
||||
/// internal MachineInstr functions.
|
||||
MachineOperand *allocateOperandArray(OperandCapacity Cap) {
|
||||
return OperandRecycler.allocate(Cap, Allocator);
|
||||
}
|
||||
|
||||
/// Dellocate an array of MachineOperands and recycle the memory. This is
|
||||
/// only intended for use by internal MachineInstr functions.
|
||||
/// Cap must be the same capacity that was used to allocate the array.
|
||||
void deallocateOperandArray(OperandCapacity Cap, MachineOperand *Array) {
|
||||
OperandRecycler.deallocate(Cap, Array);
|
||||
}
|
||||
|
||||
/// allocateMemRefsArray - Allocate an array to hold MachineMemOperand
|
||||
/// pointers. This array is owned by the MachineFunction.
|
||||
MachineInstr::mmo_iterator allocateMemRefsArray(unsigned long Num);
|
||||
|
||||
/// extractLoadMemRefs - Allocate an array and populate it with just the
|
||||
/// load information from the given MachineMemOperand sequence.
|
||||
std::pair<MachineInstr::mmo_iterator,
|
||||
MachineInstr::mmo_iterator>
|
||||
extractLoadMemRefs(MachineInstr::mmo_iterator Begin,
|
||||
MachineInstr::mmo_iterator End);
|
||||
|
||||
/// extractStoreMemRefs - Allocate an array and populate it with just the
|
||||
/// store information from the given MachineMemOperand sequence.
|
||||
std::pair<MachineInstr::mmo_iterator,
|
||||
MachineInstr::mmo_iterator>
|
||||
extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
|
||||
MachineInstr::mmo_iterator End);
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Label Manipulation.
|
||||
//
|
||||
|
||||
/// getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
|
||||
/// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
|
||||
/// normal 'L' label is returned.
|
||||
MCSymbol *getJTISymbol(unsigned JTI, MCContext &Ctx,
|
||||
bool isLinkerPrivate = false) const;
|
||||
|
||||
/// getPICBaseSymbol - Return a function-local symbol to represent the PIC
|
||||
/// base.
|
||||
MCSymbol *getPICBaseSymbol() const;
|
||||
};
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// GraphTraits specializations for function basic block graphs (CFGs)
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
// Provide specializations of GraphTraits to be able to treat a
|
||||
// machine function as a graph of machine basic blocks... these are
|
||||
// the same as the machine basic block iterators, except that the root
|
||||
// node is implicitly the first node of the function.
|
||||
//
|
||||
template <> struct GraphTraits<MachineFunction*> :
|
||||
public GraphTraits<MachineBasicBlock*> {
|
||||
static NodeType *getEntryNode(MachineFunction *F) {
|
||||
return &F->front();
|
||||
}
|
||||
|
||||
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
|
||||
typedef MachineFunction::iterator nodes_iterator;
|
||||
static nodes_iterator nodes_begin(MachineFunction *F) { return F->begin(); }
|
||||
static nodes_iterator nodes_end (MachineFunction *F) { return F->end(); }
|
||||
static unsigned size (MachineFunction *F) { return F->size(); }
|
||||
};
|
||||
template <> struct GraphTraits<const MachineFunction*> :
|
||||
public GraphTraits<const MachineBasicBlock*> {
|
||||
static NodeType *getEntryNode(const MachineFunction *F) {
|
||||
return &F->front();
|
||||
}
|
||||
|
||||
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
|
||||
typedef MachineFunction::const_iterator nodes_iterator;
|
||||
static nodes_iterator nodes_begin(const MachineFunction *F) {
|
||||
return F->begin();
|
||||
}
|
||||
static nodes_iterator nodes_end (const MachineFunction *F) {
|
||||
return F->end();
|
||||
}
|
||||
static unsigned size (const MachineFunction *F) {
|
||||
return F->size();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// Provide specializations of GraphTraits to be able to treat a function as a
|
||||
// graph of basic blocks... and to walk it in inverse order. Inverse order for
|
||||
// a function is considered to be when traversing the predecessor edges of a BB
|
||||
// instead of the successor edges.
|
||||
//
|
||||
template <> struct GraphTraits<Inverse<MachineFunction*> > :
|
||||
public GraphTraits<Inverse<MachineBasicBlock*> > {
|
||||
static NodeType *getEntryNode(Inverse<MachineFunction*> G) {
|
||||
return &G.Graph->front();
|
||||
}
|
||||
};
|
||||
template <> struct GraphTraits<Inverse<const MachineFunction*> > :
|
||||
public GraphTraits<Inverse<const MachineBasicBlock*> > {
|
||||
static NodeType *getEntryNode(Inverse<const MachineFunction *> G) {
|
||||
return &G.Graph->front();
|
||||
}
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
51
thirdparty/clang/include/llvm/CodeGen/MachineFunctionAnalysis.h
vendored
Normal file
51
thirdparty/clang/include/llvm/CodeGen/MachineFunctionAnalysis.h
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
//===-- MachineFunctionAnalysis.h - Owner of MachineFunctions ----*-C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file declares the MachineFunctionAnalysis class.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINEFUNCTIONANALYSIS_H
|
||||
#define LLVM_CODEGEN_MACHINEFUNCTIONANALYSIS_H
|
||||
|
||||
#include "llvm/Pass.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class MachineFunction;
|
||||
class TargetMachine;
|
||||
|
||||
/// MachineFunctionAnalysis - This class is a Pass that manages a
|
||||
/// MachineFunction object.
|
||||
struct MachineFunctionAnalysis : public FunctionPass {
|
||||
private:
|
||||
const TargetMachine &TM;
|
||||
MachineFunction *MF;
|
||||
unsigned NextFnNum;
|
||||
public:
|
||||
static char ID;
|
||||
explicit MachineFunctionAnalysis(const TargetMachine &tm);
|
||||
~MachineFunctionAnalysis();
|
||||
|
||||
MachineFunction &getMF() const { return *MF; }
|
||||
|
||||
virtual const char* getPassName() const {
|
||||
return "Machine Function Analysis";
|
||||
}
|
||||
|
||||
private:
|
||||
virtual bool doInitialization(Module &M);
|
||||
virtual bool runOnFunction(Function &F);
|
||||
virtual void releaseMemory();
|
||||
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
59
thirdparty/clang/include/llvm/CodeGen/MachineFunctionPass.h
vendored
Normal file
59
thirdparty/clang/include/llvm/CodeGen/MachineFunctionPass.h
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
//===-- MachineFunctionPass.h - Pass for MachineFunctions --------*-C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines the MachineFunctionPass class. MachineFunctionPass's are
|
||||
// just FunctionPass's, except they operate on machine code as part of a code
|
||||
// generator. Because they operate on machine code, not the LLVM
|
||||
// representation, MachineFunctionPass's are not allowed to modify the LLVM
|
||||
// representation. Due to this limitation, the MachineFunctionPass class takes
|
||||
// care of declaring that no LLVM passes are invalidated.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINEFUNCTIONPASS_H
|
||||
#define LLVM_CODEGEN_MACHINEFUNCTIONPASS_H
|
||||
|
||||
#include "llvm/Pass.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class MachineFunction;
|
||||
|
||||
/// MachineFunctionPass - This class adapts the FunctionPass interface to
|
||||
/// allow convenient creation of passes that operate on the MachineFunction
|
||||
/// representation. Instead of overriding runOnFunction, subclasses
|
||||
/// override runOnMachineFunction.
|
||||
class MachineFunctionPass : public FunctionPass {
|
||||
protected:
|
||||
explicit MachineFunctionPass(char &ID) : FunctionPass(ID) {}
|
||||
|
||||
/// runOnMachineFunction - This method must be overloaded to perform the
|
||||
/// desired machine code transformation or analysis.
|
||||
///
|
||||
virtual bool runOnMachineFunction(MachineFunction &MF) = 0;
|
||||
|
||||
/// getAnalysisUsage - Subclasses that override getAnalysisUsage
|
||||
/// must call this.
|
||||
///
|
||||
/// For MachineFunctionPasses, calling AU.preservesCFG() indicates that
|
||||
/// the pass does not modify the MachineBasicBlock CFG.
|
||||
///
|
||||
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
|
||||
|
||||
private:
|
||||
/// createPrinterPass - Get a machine function printer pass.
|
||||
virtual Pass *createPrinterPass(raw_ostream &O,
|
||||
const std::string &Banner) const;
|
||||
|
||||
virtual bool runOnFunction(Function &F);
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
1076
thirdparty/clang/include/llvm/CodeGen/MachineInstr.h
vendored
Normal file
1076
thirdparty/clang/include/llvm/CodeGen/MachineInstr.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
447
thirdparty/clang/include/llvm/CodeGen/MachineInstrBuilder.h
vendored
Normal file
447
thirdparty/clang/include/llvm/CodeGen/MachineInstrBuilder.h
vendored
Normal file
@@ -0,0 +1,447 @@
|
||||
//===-- CodeGen/MachineInstBuilder.h - Simplify creation of MIs -*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file exposes a function named BuildMI, which is useful for dramatically
|
||||
// simplifying how MachineInstr's are created. It allows use of code like this:
|
||||
//
|
||||
// M = BuildMI(X86::ADDrr8, 2).addReg(argVal1).addReg(argVal2);
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINEINSTRBUILDER_H
|
||||
#define LLVM_CODEGEN_MACHINEINSTRBUILDER_H
|
||||
|
||||
#include "llvm/CodeGen/MachineFunction.h"
|
||||
#include "llvm/CodeGen/MachineInstrBundle.h"
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class MCInstrDesc;
|
||||
class MDNode;
|
||||
|
||||
namespace RegState {
|
||||
enum {
|
||||
Define = 0x2,
|
||||
Implicit = 0x4,
|
||||
Kill = 0x8,
|
||||
Dead = 0x10,
|
||||
Undef = 0x20,
|
||||
EarlyClobber = 0x40,
|
||||
Debug = 0x80,
|
||||
InternalRead = 0x100,
|
||||
DefineNoRead = Define | Undef,
|
||||
ImplicitDefine = Implicit | Define,
|
||||
ImplicitKill = Implicit | Kill
|
||||
};
|
||||
}
|
||||
|
||||
class MachineInstrBuilder {
|
||||
MachineFunction *MF;
|
||||
MachineInstr *MI;
|
||||
public:
|
||||
MachineInstrBuilder() : MF(0), MI(0) {}
|
||||
|
||||
/// Create a MachineInstrBuilder for manipulating an existing instruction.
|
||||
/// F must be the machine function that was used to allocate I.
|
||||
MachineInstrBuilder(MachineFunction &F, MachineInstr *I) : MF(&F), MI(I) {}
|
||||
|
||||
/// Allow automatic conversion to the machine instruction we are working on.
|
||||
///
|
||||
operator MachineInstr*() const { return MI; }
|
||||
MachineInstr *operator->() const { return MI; }
|
||||
operator MachineBasicBlock::iterator() const { return MI; }
|
||||
|
||||
/// addReg - Add a new virtual register operand...
|
||||
///
|
||||
const
|
||||
MachineInstrBuilder &addReg(unsigned RegNo, unsigned flags = 0,
|
||||
unsigned SubReg = 0) const {
|
||||
assert((flags & 0x1) == 0 &&
|
||||
"Passing in 'true' to addReg is forbidden! Use enums instead.");
|
||||
MI->addOperand(*MF, MachineOperand::CreateReg(RegNo,
|
||||
flags & RegState::Define,
|
||||
flags & RegState::Implicit,
|
||||
flags & RegState::Kill,
|
||||
flags & RegState::Dead,
|
||||
flags & RegState::Undef,
|
||||
flags & RegState::EarlyClobber,
|
||||
SubReg,
|
||||
flags & RegState::Debug,
|
||||
flags & RegState::InternalRead));
|
||||
return *this;
|
||||
}
|
||||
|
||||
/// addImm - Add a new immediate operand.
|
||||
///
|
||||
const MachineInstrBuilder &addImm(int64_t Val) const {
|
||||
MI->addOperand(*MF, MachineOperand::CreateImm(Val));
|
||||
return *this;
|
||||
}
|
||||
|
||||
const MachineInstrBuilder &addCImm(const ConstantInt *Val) const {
|
||||
MI->addOperand(*MF, MachineOperand::CreateCImm(Val));
|
||||
return *this;
|
||||
}
|
||||
|
||||
const MachineInstrBuilder &addFPImm(const ConstantFP *Val) const {
|
||||
MI->addOperand(*MF, MachineOperand::CreateFPImm(Val));
|
||||
return *this;
|
||||
}
|
||||
|
||||
const MachineInstrBuilder &addMBB(MachineBasicBlock *MBB,
|
||||
unsigned char TargetFlags = 0) const {
|
||||
MI->addOperand(*MF, MachineOperand::CreateMBB(MBB, TargetFlags));
|
||||
return *this;
|
||||
}
|
||||
|
||||
const MachineInstrBuilder &addFrameIndex(int Idx) const {
|
||||
MI->addOperand(*MF, MachineOperand::CreateFI(Idx));
|
||||
return *this;
|
||||
}
|
||||
|
||||
const MachineInstrBuilder &addConstantPoolIndex(unsigned Idx,
|
||||
int Offset = 0,
|
||||
unsigned char TargetFlags = 0) const {
|
||||
MI->addOperand(*MF, MachineOperand::CreateCPI(Idx, Offset, TargetFlags));
|
||||
return *this;
|
||||
}
|
||||
|
||||
const MachineInstrBuilder &addTargetIndex(unsigned Idx, int64_t Offset = 0,
|
||||
unsigned char TargetFlags = 0) const {
|
||||
MI->addOperand(*MF, MachineOperand::CreateTargetIndex(Idx, Offset,
|
||||
TargetFlags));
|
||||
return *this;
|
||||
}
|
||||
|
||||
const MachineInstrBuilder &addJumpTableIndex(unsigned Idx,
|
||||
unsigned char TargetFlags = 0) const {
|
||||
MI->addOperand(*MF, MachineOperand::CreateJTI(Idx, TargetFlags));
|
||||
return *this;
|
||||
}
|
||||
|
||||
const MachineInstrBuilder &addGlobalAddress(const GlobalValue *GV,
|
||||
int64_t Offset = 0,
|
||||
unsigned char TargetFlags = 0) const {
|
||||
MI->addOperand(*MF, MachineOperand::CreateGA(GV, Offset, TargetFlags));
|
||||
return *this;
|
||||
}
|
||||
|
||||
const MachineInstrBuilder &addExternalSymbol(const char *FnName,
|
||||
unsigned char TargetFlags = 0) const {
|
||||
MI->addOperand(*MF, MachineOperand::CreateES(FnName, TargetFlags));
|
||||
return *this;
|
||||
}
|
||||
|
||||
const MachineInstrBuilder &addBlockAddress(const BlockAddress *BA,
|
||||
int64_t Offset = 0,
|
||||
unsigned char TargetFlags = 0) const {
|
||||
MI->addOperand(*MF, MachineOperand::CreateBA(BA, Offset, TargetFlags));
|
||||
return *this;
|
||||
}
|
||||
|
||||
const MachineInstrBuilder &addRegMask(const uint32_t *Mask) const {
|
||||
MI->addOperand(*MF, MachineOperand::CreateRegMask(Mask));
|
||||
return *this;
|
||||
}
|
||||
|
||||
const MachineInstrBuilder &addMemOperand(MachineMemOperand *MMO) const {
|
||||
MI->addMemOperand(*MF, MMO);
|
||||
return *this;
|
||||
}
|
||||
|
||||
const MachineInstrBuilder &setMemRefs(MachineInstr::mmo_iterator b,
|
||||
MachineInstr::mmo_iterator e) const {
|
||||
MI->setMemRefs(b, e);
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
const MachineInstrBuilder &addOperand(const MachineOperand &MO) const {
|
||||
MI->addOperand(*MF, MO);
|
||||
return *this;
|
||||
}
|
||||
|
||||
const MachineInstrBuilder &addMetadata(const MDNode *MD) const {
|
||||
MI->addOperand(*MF, MachineOperand::CreateMetadata(MD));
|
||||
return *this;
|
||||
}
|
||||
|
||||
const MachineInstrBuilder &addSym(MCSymbol *Sym) const {
|
||||
MI->addOperand(*MF, MachineOperand::CreateMCSymbol(Sym));
|
||||
return *this;
|
||||
}
|
||||
|
||||
const MachineInstrBuilder &setMIFlags(unsigned Flags) const {
|
||||
MI->setFlags(Flags);
|
||||
return *this;
|
||||
}
|
||||
|
||||
const MachineInstrBuilder &setMIFlag(MachineInstr::MIFlag Flag) const {
|
||||
MI->setFlag(Flag);
|
||||
return *this;
|
||||
}
|
||||
|
||||
// Add a displacement from an existing MachineOperand with an added offset.
|
||||
const MachineInstrBuilder &addDisp(const MachineOperand &Disp, int64_t off,
|
||||
unsigned char TargetFlags = 0) const {
|
||||
switch (Disp.getType()) {
|
||||
default:
|
||||
llvm_unreachable("Unhandled operand type in addDisp()");
|
||||
case MachineOperand::MO_Immediate:
|
||||
return addImm(Disp.getImm() + off);
|
||||
case MachineOperand::MO_GlobalAddress: {
|
||||
// If caller specifies new TargetFlags then use it, otherwise the
|
||||
// default behavior is to copy the target flags from the existing
|
||||
// MachineOperand. This means if the caller wants to clear the
|
||||
// target flags it needs to do so explicitly.
|
||||
if (TargetFlags)
|
||||
return addGlobalAddress(Disp.getGlobal(), Disp.getOffset() + off,
|
||||
TargetFlags);
|
||||
return addGlobalAddress(Disp.getGlobal(), Disp.getOffset() + off,
|
||||
Disp.getTargetFlags());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Copy all the implicit operands from OtherMI onto this one.
|
||||
const MachineInstrBuilder ©ImplicitOps(const MachineInstr *OtherMI) {
|
||||
MI->copyImplicitOps(*MF, OtherMI);
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
/// BuildMI - Builder interface. Specify how to create the initial instruction
|
||||
/// itself.
|
||||
///
|
||||
inline MachineInstrBuilder BuildMI(MachineFunction &MF,
|
||||
DebugLoc DL,
|
||||
const MCInstrDesc &MCID) {
|
||||
return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, DL));
|
||||
}
|
||||
|
||||
/// BuildMI - This version of the builder sets up the first operand as a
|
||||
/// destination virtual register.
|
||||
///
|
||||
inline MachineInstrBuilder BuildMI(MachineFunction &MF,
|
||||
DebugLoc DL,
|
||||
const MCInstrDesc &MCID,
|
||||
unsigned DestReg) {
|
||||
return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, DL))
|
||||
.addReg(DestReg, RegState::Define);
|
||||
}
|
||||
|
||||
/// BuildMI - This version of the builder inserts the newly-built
|
||||
/// instruction before the given position in the given MachineBasicBlock, and
|
||||
/// sets up the first operand as a destination virtual register.
|
||||
///
|
||||
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
|
||||
MachineBasicBlock::iterator I,
|
||||
DebugLoc DL,
|
||||
const MCInstrDesc &MCID,
|
||||
unsigned DestReg) {
|
||||
MachineFunction &MF = *BB.getParent();
|
||||
MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
|
||||
BB.insert(I, MI);
|
||||
return MachineInstrBuilder(MF, MI).addReg(DestReg, RegState::Define);
|
||||
}
|
||||
|
||||
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
|
||||
MachineBasicBlock::instr_iterator I,
|
||||
DebugLoc DL,
|
||||
const MCInstrDesc &MCID,
|
||||
unsigned DestReg) {
|
||||
MachineFunction &MF = *BB.getParent();
|
||||
MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
|
||||
BB.insert(I, MI);
|
||||
return MachineInstrBuilder(MF, MI).addReg(DestReg, RegState::Define);
|
||||
}
|
||||
|
||||
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
|
||||
MachineInstr *I,
|
||||
DebugLoc DL,
|
||||
const MCInstrDesc &MCID,
|
||||
unsigned DestReg) {
|
||||
if (I->isInsideBundle()) {
|
||||
MachineBasicBlock::instr_iterator MII = I;
|
||||
return BuildMI(BB, MII, DL, MCID, DestReg);
|
||||
}
|
||||
|
||||
MachineBasicBlock::iterator MII = I;
|
||||
return BuildMI(BB, MII, DL, MCID, DestReg);
|
||||
}
|
||||
|
||||
/// BuildMI - This version of the builder inserts the newly-built
|
||||
/// instruction before the given position in the given MachineBasicBlock, and
|
||||
/// does NOT take a destination register.
|
||||
///
|
||||
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
|
||||
MachineBasicBlock::iterator I,
|
||||
DebugLoc DL,
|
||||
const MCInstrDesc &MCID) {
|
||||
MachineFunction &MF = *BB.getParent();
|
||||
MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
|
||||
BB.insert(I, MI);
|
||||
return MachineInstrBuilder(MF, MI);
|
||||
}
|
||||
|
||||
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
|
||||
MachineBasicBlock::instr_iterator I,
|
||||
DebugLoc DL,
|
||||
const MCInstrDesc &MCID) {
|
||||
MachineFunction &MF = *BB.getParent();
|
||||
MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
|
||||
BB.insert(I, MI);
|
||||
return MachineInstrBuilder(MF, MI);
|
||||
}
|
||||
|
||||
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
|
||||
MachineInstr *I,
|
||||
DebugLoc DL,
|
||||
const MCInstrDesc &MCID) {
|
||||
if (I->isInsideBundle()) {
|
||||
MachineBasicBlock::instr_iterator MII = I;
|
||||
return BuildMI(BB, MII, DL, MCID);
|
||||
}
|
||||
|
||||
MachineBasicBlock::iterator MII = I;
|
||||
return BuildMI(BB, MII, DL, MCID);
|
||||
}
|
||||
|
||||
/// BuildMI - This version of the builder inserts the newly-built
|
||||
/// instruction at the end of the given MachineBasicBlock, and does NOT take a
|
||||
/// destination register.
|
||||
///
|
||||
inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB,
|
||||
DebugLoc DL,
|
||||
const MCInstrDesc &MCID) {
|
||||
return BuildMI(*BB, BB->end(), DL, MCID);
|
||||
}
|
||||
|
||||
/// BuildMI - This version of the builder inserts the newly-built
|
||||
/// instruction at the end of the given MachineBasicBlock, and sets up the first
|
||||
/// operand as a destination virtual register.
|
||||
///
|
||||
inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB,
|
||||
DebugLoc DL,
|
||||
const MCInstrDesc &MCID,
|
||||
unsigned DestReg) {
|
||||
return BuildMI(*BB, BB->end(), DL, MCID, DestReg);
|
||||
}
|
||||
|
||||
inline unsigned getDefRegState(bool B) {
|
||||
return B ? RegState::Define : 0;
|
||||
}
|
||||
inline unsigned getImplRegState(bool B) {
|
||||
return B ? RegState::Implicit : 0;
|
||||
}
|
||||
inline unsigned getKillRegState(bool B) {
|
||||
return B ? RegState::Kill : 0;
|
||||
}
|
||||
inline unsigned getDeadRegState(bool B) {
|
||||
return B ? RegState::Dead : 0;
|
||||
}
|
||||
inline unsigned getUndefRegState(bool B) {
|
||||
return B ? RegState::Undef : 0;
|
||||
}
|
||||
inline unsigned getInternalReadRegState(bool B) {
|
||||
return B ? RegState::InternalRead : 0;
|
||||
}
|
||||
inline unsigned getDebugRegState(bool B) {
|
||||
return B ? RegState::Debug : 0;
|
||||
}
|
||||
|
||||
|
||||
/// Helper class for constructing bundles of MachineInstrs.
|
||||
///
|
||||
/// MIBundleBuilder can create a bundle from scratch by inserting new
|
||||
/// MachineInstrs one at a time, or it can create a bundle from a sequence of
|
||||
/// existing MachineInstrs in a basic block.
|
||||
class MIBundleBuilder {
|
||||
MachineBasicBlock &MBB;
|
||||
MachineBasicBlock::instr_iterator Begin;
|
||||
MachineBasicBlock::instr_iterator End;
|
||||
|
||||
public:
|
||||
/// Create an MIBundleBuilder that inserts instructions into a new bundle in
|
||||
/// BB above the bundle or instruction at Pos.
|
||||
MIBundleBuilder(MachineBasicBlock &BB,
|
||||
MachineBasicBlock::iterator Pos)
|
||||
: MBB(BB), Begin(Pos.getInstrIterator()), End(Begin) {}
|
||||
|
||||
/// Create a bundle from the sequence of instructions between B and E.
|
||||
MIBundleBuilder(MachineBasicBlock &BB,
|
||||
MachineBasicBlock::iterator B,
|
||||
MachineBasicBlock::iterator E)
|
||||
: MBB(BB), Begin(B.getInstrIterator()), End(E.getInstrIterator()) {
|
||||
assert(B != E && "No instructions to bundle");
|
||||
++B;
|
||||
while (B != E) {
|
||||
MachineInstr *MI = B;
|
||||
++B;
|
||||
MI->bundleWithPred();
|
||||
}
|
||||
}
|
||||
|
||||
/// Create an MIBundleBuilder representing an existing instruction or bundle
|
||||
/// that has MI as its head.
|
||||
explicit MIBundleBuilder(MachineInstr *MI)
|
||||
: MBB(*MI->getParent()), Begin(MI), End(getBundleEnd(MI)) {}
|
||||
|
||||
/// Return a reference to the basic block containing this bundle.
|
||||
MachineBasicBlock &getMBB() const { return MBB; }
|
||||
|
||||
/// Return true if no instructions have been inserted in this bundle yet.
|
||||
/// Empty bundles aren't representable in a MachineBasicBlock.
|
||||
bool empty() const { return Begin == End; }
|
||||
|
||||
/// Return an iterator to the first bundled instruction.
|
||||
MachineBasicBlock::instr_iterator begin() const { return Begin; }
|
||||
|
||||
/// Return an iterator beyond the last bundled instruction.
|
||||
MachineBasicBlock::instr_iterator end() const { return End; }
|
||||
|
||||
/// Insert MI into this bundle before I which must point to an instruction in
|
||||
/// the bundle, or end().
|
||||
MIBundleBuilder &insert(MachineBasicBlock::instr_iterator I,
|
||||
MachineInstr *MI) {
|
||||
MBB.insert(I, MI);
|
||||
if (I == Begin) {
|
||||
if (!empty())
|
||||
MI->bundleWithSucc();
|
||||
Begin = MI;
|
||||
return *this;
|
||||
}
|
||||
if (I == End) {
|
||||
MI->bundleWithPred();
|
||||
return *this;
|
||||
}
|
||||
// MI was inserted in the middle of the bundle, so its neighbors' flags are
|
||||
// already fine. Update MI's bundle flags manually.
|
||||
MI->setFlag(MachineInstr::BundledPred);
|
||||
MI->setFlag(MachineInstr::BundledSucc);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/// Insert MI into MBB by prepending it to the instructions in the bundle.
|
||||
/// MI will become the first instruction in the bundle.
|
||||
MIBundleBuilder &prepend(MachineInstr *MI) {
|
||||
return insert(begin(), MI);
|
||||
}
|
||||
|
||||
/// Insert MI into MBB by appending it to the instructions in the bundle.
|
||||
/// MI will become the last instruction in the bundle.
|
||||
MIBundleBuilder &append(MachineInstr *MI) {
|
||||
return insert(end(), MI);
|
||||
}
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
252
thirdparty/clang/include/llvm/CodeGen/MachineInstrBundle.h
vendored
Normal file
252
thirdparty/clang/include/llvm/CodeGen/MachineInstrBundle.h
vendored
Normal file
@@ -0,0 +1,252 @@
|
||||
//===-- CodeGen/MachineInstBundle.h - MI bundle utilities -------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file provide utility functions to manipulate machine instruction
|
||||
// bundles.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINEINSTRBUNDLE_H
|
||||
#define LLVM_CODEGEN_MACHINEINSTRBUNDLE_H
|
||||
|
||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
/// finalizeBundle - Finalize a machine instruction bundle which includes
|
||||
/// a sequence of instructions starting from FirstMI to LastMI (exclusive).
|
||||
/// This routine adds a BUNDLE instruction to represent the bundle, it adds
|
||||
/// IsInternalRead markers to MachineOperands which are defined inside the
|
||||
/// bundle, and it copies externally visible defs and uses to the BUNDLE
|
||||
/// instruction.
|
||||
void finalizeBundle(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::instr_iterator FirstMI,
|
||||
MachineBasicBlock::instr_iterator LastMI);
|
||||
|
||||
/// finalizeBundle - Same functionality as the previous finalizeBundle except
|
||||
/// the last instruction in the bundle is not provided as an input. This is
|
||||
/// used in cases where bundles are pre-determined by marking instructions
|
||||
/// with 'InsideBundle' marker. It returns the MBB instruction iterator that
|
||||
/// points to the end of the bundle.
|
||||
MachineBasicBlock::instr_iterator finalizeBundle(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::instr_iterator FirstMI);
|
||||
|
||||
/// finalizeBundles - Finalize instruction bundles in the specified
|
||||
/// MachineFunction. Return true if any bundles are finalized.
|
||||
bool finalizeBundles(MachineFunction &MF);
|
||||
|
||||
/// getBundleStart - Returns the first instruction in the bundle containing MI.
|
||||
///
|
||||
inline MachineInstr *getBundleStart(MachineInstr *MI) {
|
||||
MachineBasicBlock::instr_iterator I = MI;
|
||||
while (I->isBundledWithPred())
|
||||
--I;
|
||||
return I;
|
||||
}
|
||||
|
||||
inline const MachineInstr *getBundleStart(const MachineInstr *MI) {
|
||||
MachineBasicBlock::const_instr_iterator I = MI;
|
||||
while (I->isBundledWithPred())
|
||||
--I;
|
||||
return I;
|
||||
}
|
||||
|
||||
/// Return an iterator pointing beyond the bundle containing MI.
|
||||
inline MachineBasicBlock::instr_iterator
|
||||
getBundleEnd(MachineInstr *MI) {
|
||||
MachineBasicBlock::instr_iterator I = MI;
|
||||
while (I->isBundledWithSucc())
|
||||
++I;
|
||||
return ++I;
|
||||
}
|
||||
|
||||
/// Return an iterator pointing beyond the bundle containing MI.
|
||||
inline MachineBasicBlock::const_instr_iterator
|
||||
getBundleEnd(const MachineInstr *MI) {
|
||||
MachineBasicBlock::const_instr_iterator I = MI;
|
||||
while (I->isBundledWithSucc())
|
||||
++I;
|
||||
return ++I;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// MachineOperand iterator
|
||||
//
|
||||
|
||||
/// MachineOperandIteratorBase - Iterator that can visit all operands on a
|
||||
/// MachineInstr, or all operands on a bundle of MachineInstrs. This class is
|
||||
/// not intended to be used directly, use one of the sub-classes instead.
|
||||
///
|
||||
/// Intended use:
|
||||
///
|
||||
/// for (MIBundleOperands MIO(MI); MIO.isValid(); ++MIO) {
|
||||
/// if (!MIO->isReg())
|
||||
/// continue;
|
||||
/// ...
|
||||
/// }
|
||||
///
|
||||
class MachineOperandIteratorBase {
|
||||
MachineBasicBlock::instr_iterator InstrI, InstrE;
|
||||
MachineInstr::mop_iterator OpI, OpE;
|
||||
|
||||
// If the operands on InstrI are exhausted, advance InstrI to the next
|
||||
// bundled instruction with operands.
|
||||
void advance() {
|
||||
while (OpI == OpE) {
|
||||
// Don't advance off the basic block, or into a new bundle.
|
||||
if (++InstrI == InstrE || !InstrI->isInsideBundle())
|
||||
break;
|
||||
OpI = InstrI->operands_begin();
|
||||
OpE = InstrI->operands_end();
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
/// MachineOperandIteratorBase - Create an iterator that visits all operands
|
||||
/// on MI, or all operands on every instruction in the bundle containing MI.
|
||||
///
|
||||
/// @param MI The instruction to examine.
|
||||
/// @param WholeBundle When true, visit all operands on the entire bundle.
|
||||
///
|
||||
explicit MachineOperandIteratorBase(MachineInstr *MI, bool WholeBundle) {
|
||||
if (WholeBundle) {
|
||||
InstrI = getBundleStart(MI);
|
||||
InstrE = MI->getParent()->instr_end();
|
||||
} else {
|
||||
InstrI = InstrE = MI;
|
||||
++InstrE;
|
||||
}
|
||||
OpI = InstrI->operands_begin();
|
||||
OpE = InstrI->operands_end();
|
||||
if (WholeBundle)
|
||||
advance();
|
||||
}
|
||||
|
||||
MachineOperand &deref() const { return *OpI; }
|
||||
|
||||
public:
|
||||
/// isValid - Returns true until all the operands have been visited.
|
||||
bool isValid() const { return OpI != OpE; }
|
||||
|
||||
/// Preincrement. Move to the next operand.
|
||||
void operator++() {
|
||||
assert(isValid() && "Cannot advance MIOperands beyond the last operand");
|
||||
++OpI;
|
||||
advance();
|
||||
}
|
||||
|
||||
/// getOperandNo - Returns the number of the current operand relative to its
|
||||
/// instruction.
|
||||
///
|
||||
unsigned getOperandNo() const {
|
||||
return OpI - InstrI->operands_begin();
|
||||
}
|
||||
|
||||
/// VirtRegInfo - Information about a virtual register used by a set of operands.
|
||||
///
|
||||
struct VirtRegInfo {
|
||||
/// Reads - One of the operands read the virtual register. This does not
|
||||
/// include <undef> or <internal> use operands, see MO::readsReg().
|
||||
bool Reads;
|
||||
|
||||
/// Writes - One of the operands writes the virtual register.
|
||||
bool Writes;
|
||||
|
||||
/// Tied - Uses and defs must use the same register. This can be because of
|
||||
/// a two-address constraint, or there may be a partial redefinition of a
|
||||
/// sub-register.
|
||||
bool Tied;
|
||||
};
|
||||
|
||||
/// PhysRegInfo - Information about a physical register used by a set of
|
||||
/// operands.
|
||||
struct PhysRegInfo {
|
||||
/// Clobbers - Reg or an overlapping register is defined, or a regmask
|
||||
/// clobbers Reg.
|
||||
bool Clobbers;
|
||||
|
||||
/// Defines - Reg or a super-register is defined.
|
||||
bool Defines;
|
||||
|
||||
/// Reads - Read or a super-register is read.
|
||||
bool Reads;
|
||||
|
||||
/// ReadsOverlap - Reg or an overlapping register is read.
|
||||
bool ReadsOverlap;
|
||||
|
||||
/// DefinesDead - All defs of a Reg or a super-register are dead.
|
||||
bool DefinesDead;
|
||||
|
||||
/// There is a kill of Reg or a super-register.
|
||||
bool Kills;
|
||||
};
|
||||
|
||||
/// analyzeVirtReg - Analyze how the current instruction or bundle uses a
|
||||
/// virtual register. This function should not be called after operator++(),
|
||||
/// it expects a fresh iterator.
|
||||
///
|
||||
/// @param Reg The virtual register to analyze.
|
||||
/// @param Ops When set, this vector will receive an (MI, OpNum) entry for
|
||||
/// each operand referring to Reg.
|
||||
/// @returns A filled-in RegInfo struct.
|
||||
VirtRegInfo analyzeVirtReg(unsigned Reg,
|
||||
SmallVectorImpl<std::pair<MachineInstr*, unsigned> > *Ops = 0);
|
||||
|
||||
/// analyzePhysReg - Analyze how the current instruction or bundle uses a
|
||||
/// physical register. This function should not be called after operator++(),
|
||||
/// it expects a fresh iterator.
|
||||
///
|
||||
/// @param Reg The physical register to analyze.
|
||||
/// @returns A filled-in PhysRegInfo struct.
|
||||
PhysRegInfo analyzePhysReg(unsigned Reg, const TargetRegisterInfo *TRI);
|
||||
};
|
||||
|
||||
/// MIOperands - Iterate over operands of a single instruction.
|
||||
///
|
||||
class MIOperands : public MachineOperandIteratorBase {
|
||||
public:
|
||||
MIOperands(MachineInstr *MI) : MachineOperandIteratorBase(MI, false) {}
|
||||
MachineOperand &operator* () const { return deref(); }
|
||||
MachineOperand *operator->() const { return &deref(); }
|
||||
};
|
||||
|
||||
/// ConstMIOperands - Iterate over operands of a single const instruction.
|
||||
///
|
||||
class ConstMIOperands : public MachineOperandIteratorBase {
|
||||
public:
|
||||
ConstMIOperands(const MachineInstr *MI)
|
||||
: MachineOperandIteratorBase(const_cast<MachineInstr*>(MI), false) {}
|
||||
const MachineOperand &operator* () const { return deref(); }
|
||||
const MachineOperand *operator->() const { return &deref(); }
|
||||
};
|
||||
|
||||
/// MIBundleOperands - Iterate over all operands in a bundle of machine
|
||||
/// instructions.
|
||||
///
|
||||
class MIBundleOperands : public MachineOperandIteratorBase {
|
||||
public:
|
||||
MIBundleOperands(MachineInstr *MI) : MachineOperandIteratorBase(MI, true) {}
|
||||
MachineOperand &operator* () const { return deref(); }
|
||||
MachineOperand *operator->() const { return &deref(); }
|
||||
};
|
||||
|
||||
/// ConstMIBundleOperands - Iterate over all operands in a const bundle of
|
||||
/// machine instructions.
|
||||
///
|
||||
class ConstMIBundleOperands : public MachineOperandIteratorBase {
|
||||
public:
|
||||
ConstMIBundleOperands(const MachineInstr *MI)
|
||||
: MachineOperandIteratorBase(const_cast<MachineInstr*>(MI), true) {}
|
||||
const MachineOperand &operator* () const { return deref(); }
|
||||
const MachineOperand *operator->() const { return &deref(); }
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
130
thirdparty/clang/include/llvm/CodeGen/MachineJumpTableInfo.h
vendored
Normal file
130
thirdparty/clang/include/llvm/CodeGen/MachineJumpTableInfo.h
vendored
Normal file
@@ -0,0 +1,130 @@
|
||||
//===-- CodeGen/MachineJumpTableInfo.h - Abstract Jump Tables --*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// The MachineJumpTableInfo class keeps track of jump tables referenced by
|
||||
// lowered switch instructions in the MachineFunction.
|
||||
//
|
||||
// Instructions reference the address of these jump tables through the use of
|
||||
// MO_JumpTableIndex values. When emitting assembly or machine code, these
|
||||
// virtual address references are converted to refer to the address of the
|
||||
// function jump tables.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINEJUMPTABLEINFO_H
|
||||
#define LLVM_CODEGEN_MACHINEJUMPTABLEINFO_H
|
||||
|
||||
#include <cassert>
|
||||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class MachineBasicBlock;
|
||||
class DataLayout;
|
||||
class raw_ostream;
|
||||
|
||||
/// MachineJumpTableEntry - One jump table in the jump table info.
|
||||
///
|
||||
struct MachineJumpTableEntry {
|
||||
/// MBBs - The vector of basic blocks from which to create the jump table.
|
||||
std::vector<MachineBasicBlock*> MBBs;
|
||||
|
||||
explicit MachineJumpTableEntry(const std::vector<MachineBasicBlock*> &M)
|
||||
: MBBs(M) {}
|
||||
};
|
||||
|
||||
class MachineJumpTableInfo {
|
||||
public:
|
||||
/// JTEntryKind - This enum indicates how each entry of the jump table is
|
||||
/// represented and emitted.
|
||||
enum JTEntryKind {
|
||||
/// EK_BlockAddress - Each entry is a plain address of block, e.g.:
|
||||
/// .word LBB123
|
||||
EK_BlockAddress,
|
||||
|
||||
/// EK_GPRel64BlockAddress - Each entry is an address of block, encoded
|
||||
/// with a relocation as gp-relative, e.g.:
|
||||
/// .gpdword LBB123
|
||||
EK_GPRel64BlockAddress,
|
||||
|
||||
/// EK_GPRel32BlockAddress - Each entry is an address of block, encoded
|
||||
/// with a relocation as gp-relative, e.g.:
|
||||
/// .gprel32 LBB123
|
||||
EK_GPRel32BlockAddress,
|
||||
|
||||
/// EK_LabelDifference32 - Each entry is the address of the block minus
|
||||
/// the address of the jump table. This is used for PIC jump tables where
|
||||
/// gprel32 is not supported. e.g.:
|
||||
/// .word LBB123 - LJTI1_2
|
||||
/// If the .set directive is supported, this is emitted as:
|
||||
/// .set L4_5_set_123, LBB123 - LJTI1_2
|
||||
/// .word L4_5_set_123
|
||||
EK_LabelDifference32,
|
||||
|
||||
/// EK_Inline - Jump table entries are emitted inline at their point of
|
||||
/// use. It is the responsibility of the target to emit the entries.
|
||||
EK_Inline,
|
||||
|
||||
/// EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the
|
||||
/// TargetLowering::LowerCustomJumpTableEntry hook.
|
||||
EK_Custom32
|
||||
};
|
||||
private:
|
||||
JTEntryKind EntryKind;
|
||||
std::vector<MachineJumpTableEntry> JumpTables;
|
||||
public:
|
||||
explicit MachineJumpTableInfo(JTEntryKind Kind): EntryKind(Kind) {}
|
||||
|
||||
JTEntryKind getEntryKind() const { return EntryKind; }
|
||||
|
||||
/// getEntrySize - Return the size of each entry in the jump table.
|
||||
unsigned getEntrySize(const DataLayout &TD) const;
|
||||
/// getEntryAlignment - Return the alignment of each entry in the jump table.
|
||||
unsigned getEntryAlignment(const DataLayout &TD) const;
|
||||
|
||||
/// createJumpTableIndex - Create a new jump table.
|
||||
///
|
||||
unsigned createJumpTableIndex(const std::vector<MachineBasicBlock*> &DestBBs);
|
||||
|
||||
/// isEmpty - Return true if there are no jump tables.
|
||||
///
|
||||
bool isEmpty() const { return JumpTables.empty(); }
|
||||
|
||||
const std::vector<MachineJumpTableEntry> &getJumpTables() const {
|
||||
return JumpTables;
|
||||
}
|
||||
|
||||
/// RemoveJumpTable - Mark the specific index as being dead. This will
|
||||
/// prevent it from being emitted.
|
||||
void RemoveJumpTable(unsigned Idx) {
|
||||
JumpTables[Idx].MBBs.clear();
|
||||
}
|
||||
|
||||
/// ReplaceMBBInJumpTables - If Old is the target of any jump tables, update
|
||||
/// the jump tables to branch to New instead.
|
||||
bool ReplaceMBBInJumpTables(MachineBasicBlock *Old, MachineBasicBlock *New);
|
||||
|
||||
/// ReplaceMBBInJumpTable - If Old is a target of the jump tables, update
|
||||
/// the jump table to branch to New instead.
|
||||
bool ReplaceMBBInJumpTable(unsigned Idx, MachineBasicBlock *Old,
|
||||
MachineBasicBlock *New);
|
||||
|
||||
/// print - Used by the MachineFunction printer to print information about
|
||||
/// jump tables. Implemented in MachineFunction.cpp
|
||||
///
|
||||
void print(raw_ostream &OS) const;
|
||||
|
||||
/// dump - Call to stderr.
|
||||
///
|
||||
void dump() const;
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
190
thirdparty/clang/include/llvm/CodeGen/MachineLoopInfo.h
vendored
Normal file
190
thirdparty/clang/include/llvm/CodeGen/MachineLoopInfo.h
vendored
Normal file
@@ -0,0 +1,190 @@
|
||||
//===- llvm/CodeGen/MachineLoopInfo.h - Natural Loop Calculator -*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines the MachineLoopInfo class that is used to identify natural
|
||||
// loops and determine the loop depth of various nodes of the CFG. Note that
|
||||
// natural loops may actually be several loops that share the same header node.
|
||||
//
|
||||
// This analysis calculates the nesting structure of loops in a function. For
|
||||
// each natural loop identified, this analysis identifies natural loops
|
||||
// contained entirely within the loop and the basic blocks the make up the loop.
|
||||
//
|
||||
// It can calculate on the fly various bits of information, for example:
|
||||
//
|
||||
// * whether there is a preheader for the loop
|
||||
// * the number of back edges to the header
|
||||
// * whether or not a particular block branches out of the loop
|
||||
// * the successor blocks of the loop
|
||||
// * the loop depth
|
||||
// * the trip count
|
||||
// * etc...
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINELOOPINFO_H
|
||||
#define LLVM_CODEGEN_MACHINELOOPINFO_H
|
||||
|
||||
#include "llvm/Analysis/LoopInfo.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
// Implementation in LoopInfoImpl.h
|
||||
#ifdef __GNUC__
|
||||
class MachineLoop;
|
||||
__extension__ extern template class LoopBase<MachineBasicBlock, MachineLoop>;
|
||||
#endif
|
||||
|
||||
class MachineLoop : public LoopBase<MachineBasicBlock, MachineLoop> {
|
||||
public:
|
||||
MachineLoop();
|
||||
|
||||
/// getTopBlock - Return the "top" block in the loop, which is the first
|
||||
/// block in the linear layout, ignoring any parts of the loop not
|
||||
/// contiguous with the part the contains the header.
|
||||
MachineBasicBlock *getTopBlock();
|
||||
|
||||
/// getBottomBlock - Return the "bottom" block in the loop, which is the last
|
||||
/// block in the linear layout, ignoring any parts of the loop not
|
||||
/// contiguous with the part the contains the header.
|
||||
MachineBasicBlock *getBottomBlock();
|
||||
|
||||
void dump() const;
|
||||
|
||||
private:
|
||||
friend class LoopInfoBase<MachineBasicBlock, MachineLoop>;
|
||||
explicit MachineLoop(MachineBasicBlock *MBB)
|
||||
: LoopBase<MachineBasicBlock, MachineLoop>(MBB) {}
|
||||
};
|
||||
|
||||
// Implementation in LoopInfoImpl.h
|
||||
#ifdef __GNUC__
|
||||
__extension__ extern template
|
||||
class LoopInfoBase<MachineBasicBlock, MachineLoop>;
|
||||
#endif
|
||||
|
||||
class MachineLoopInfo : public MachineFunctionPass {
|
||||
LoopInfoBase<MachineBasicBlock, MachineLoop> LI;
|
||||
friend class LoopBase<MachineBasicBlock, MachineLoop>;
|
||||
|
||||
void operator=(const MachineLoopInfo &) LLVM_DELETED_FUNCTION;
|
||||
MachineLoopInfo(const MachineLoopInfo &) LLVM_DELETED_FUNCTION;
|
||||
|
||||
public:
|
||||
static char ID; // Pass identification, replacement for typeid
|
||||
|
||||
MachineLoopInfo() : MachineFunctionPass(ID) {
|
||||
initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
|
||||
}
|
||||
|
||||
LoopInfoBase<MachineBasicBlock, MachineLoop>& getBase() { return LI; }
|
||||
|
||||
/// iterator/begin/end - The interface to the top-level loops in the current
|
||||
/// function.
|
||||
///
|
||||
typedef LoopInfoBase<MachineBasicBlock, MachineLoop>::iterator iterator;
|
||||
inline iterator begin() const { return LI.begin(); }
|
||||
inline iterator end() const { return LI.end(); }
|
||||
bool empty() const { return LI.empty(); }
|
||||
|
||||
/// getLoopFor - Return the inner most loop that BB lives in. If a basic
|
||||
/// block is in no loop (for example the entry node), null is returned.
|
||||
///
|
||||
inline MachineLoop *getLoopFor(const MachineBasicBlock *BB) const {
|
||||
return LI.getLoopFor(BB);
|
||||
}
|
||||
|
||||
/// operator[] - same as getLoopFor...
|
||||
///
|
||||
inline const MachineLoop *operator[](const MachineBasicBlock *BB) const {
|
||||
return LI.getLoopFor(BB);
|
||||
}
|
||||
|
||||
/// getLoopDepth - Return the loop nesting level of the specified block...
|
||||
///
|
||||
inline unsigned getLoopDepth(const MachineBasicBlock *BB) const {
|
||||
return LI.getLoopDepth(BB);
|
||||
}
|
||||
|
||||
// isLoopHeader - True if the block is a loop header node
|
||||
inline bool isLoopHeader(MachineBasicBlock *BB) const {
|
||||
return LI.isLoopHeader(BB);
|
||||
}
|
||||
|
||||
/// runOnFunction - Calculate the natural loop information.
|
||||
///
|
||||
virtual bool runOnMachineFunction(MachineFunction &F);
|
||||
|
||||
virtual void releaseMemory() { LI.releaseMemory(); }
|
||||
|
||||
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
|
||||
|
||||
/// removeLoop - This removes the specified top-level loop from this loop info
|
||||
/// object. The loop is not deleted, as it will presumably be inserted into
|
||||
/// another loop.
|
||||
inline MachineLoop *removeLoop(iterator I) { return LI.removeLoop(I); }
|
||||
|
||||
/// changeLoopFor - Change the top-level loop that contains BB to the
|
||||
/// specified loop. This should be used by transformations that restructure
|
||||
/// the loop hierarchy tree.
|
||||
inline void changeLoopFor(MachineBasicBlock *BB, MachineLoop *L) {
|
||||
LI.changeLoopFor(BB, L);
|
||||
}
|
||||
|
||||
/// changeTopLevelLoop - Replace the specified loop in the top-level loops
|
||||
/// list with the indicated loop.
|
||||
inline void changeTopLevelLoop(MachineLoop *OldLoop, MachineLoop *NewLoop) {
|
||||
LI.changeTopLevelLoop(OldLoop, NewLoop);
|
||||
}
|
||||
|
||||
/// addTopLevelLoop - This adds the specified loop to the collection of
|
||||
/// top-level loops.
|
||||
inline void addTopLevelLoop(MachineLoop *New) {
|
||||
LI.addTopLevelLoop(New);
|
||||
}
|
||||
|
||||
/// removeBlock - This method completely removes BB from all data structures,
|
||||
/// including all of the Loop objects it is nested in and our mapping from
|
||||
/// MachineBasicBlocks to loops.
|
||||
void removeBlock(MachineBasicBlock *BB) {
|
||||
LI.removeBlock(BB);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// Allow clients to walk the list of nested loops...
|
||||
template <> struct GraphTraits<const MachineLoop*> {
|
||||
typedef const MachineLoop NodeType;
|
||||
typedef MachineLoopInfo::iterator ChildIteratorType;
|
||||
|
||||
static NodeType *getEntryNode(const MachineLoop *L) { return L; }
|
||||
static inline ChildIteratorType child_begin(NodeType *N) {
|
||||
return N->begin();
|
||||
}
|
||||
static inline ChildIteratorType child_end(NodeType *N) {
|
||||
return N->end();
|
||||
}
|
||||
};
|
||||
|
||||
template <> struct GraphTraits<MachineLoop*> {
|
||||
typedef MachineLoop NodeType;
|
||||
typedef MachineLoopInfo::iterator ChildIteratorType;
|
||||
|
||||
static NodeType *getEntryNode(MachineLoop *L) { return L; }
|
||||
static inline ChildIteratorType child_begin(NodeType *N) {
|
||||
return N->begin();
|
||||
}
|
||||
static inline ChildIteratorType child_end(NodeType *N) {
|
||||
return N->end();
|
||||
}
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
112
thirdparty/clang/include/llvm/CodeGen/MachineLoopRanges.h
vendored
Normal file
112
thirdparty/clang/include/llvm/CodeGen/MachineLoopRanges.h
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
//===- MachineLoopRanges.h - Ranges of machine loops -----------*- c++ -*--===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file provides the interface to the MachineLoopRanges analysis.
|
||||
//
|
||||
// Provide on-demand information about the ranges of machine instructions
|
||||
// covered by a loop.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINELOOPRANGES_H
|
||||
#define LLVM_CODEGEN_MACHINELOOPRANGES_H
|
||||
|
||||
#include "llvm/ADT/IntervalMap.h"
|
||||
#include "llvm/CodeGen/SlotIndexes.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class MachineLoop;
|
||||
class MachineLoopInfo;
|
||||
class raw_ostream;
|
||||
|
||||
/// MachineLoopRange - Range information for a single loop.
|
||||
class MachineLoopRange {
|
||||
friend class MachineLoopRanges;
|
||||
|
||||
public:
|
||||
typedef IntervalMap<SlotIndex, unsigned, 4> Map;
|
||||
typedef Map::Allocator Allocator;
|
||||
|
||||
private:
|
||||
/// The mapped loop.
|
||||
const MachineLoop *const Loop;
|
||||
|
||||
/// Map intervals to a bit mask.
|
||||
/// Bit 0 = inside loop block.
|
||||
Map Intervals;
|
||||
|
||||
/// Loop area as measured by SlotIndex::distance.
|
||||
unsigned Area;
|
||||
|
||||
/// Create a MachineLoopRange, only accessible to MachineLoopRanges.
|
||||
MachineLoopRange(const MachineLoop*, Allocator&, SlotIndexes&);
|
||||
|
||||
public:
|
||||
/// getLoop - Return the mapped machine loop.
|
||||
const MachineLoop *getLoop() const { return Loop; }
|
||||
|
||||
/// overlaps - Return true if this loop overlaps the given range of machine
|
||||
/// inteructions.
|
||||
bool overlaps(SlotIndex Start, SlotIndex Stop);
|
||||
|
||||
/// getNumber - Return the loop number. This is the same as the number of the
|
||||
/// header block.
|
||||
unsigned getNumber() const;
|
||||
|
||||
/// getArea - Return the loop area. This number is approximately proportional
|
||||
/// to the number of instructions in the loop.
|
||||
unsigned getArea() const { return Area; }
|
||||
|
||||
/// getMap - Allow public read-only access for IntervalMapOverlaps.
|
||||
const Map &getMap() { return Intervals; }
|
||||
|
||||
/// print - Print loop ranges on OS.
|
||||
void print(raw_ostream&) const;
|
||||
|
||||
/// byNumber - Comparator for array_pod_sort that sorts a list of
|
||||
/// MachineLoopRange pointers by number.
|
||||
static int byNumber(const void*, const void*);
|
||||
|
||||
/// byAreaDesc - Comparator for array_pod_sort that sorts a list of
|
||||
/// MachineLoopRange pointers by descending area, then by number.
|
||||
static int byAreaDesc(const void*, const void*);
|
||||
};
|
||||
|
||||
raw_ostream &operator<<(raw_ostream&, const MachineLoopRange&);
|
||||
|
||||
/// MachineLoopRanges - Analysis pass that provides on-demand per-loop range
|
||||
/// information.
|
||||
class MachineLoopRanges : public MachineFunctionPass {
|
||||
typedef DenseMap<const MachineLoop*, MachineLoopRange*> CacheMap;
|
||||
typedef MachineLoopRange::Allocator MapAllocator;
|
||||
|
||||
MapAllocator Allocator;
|
||||
SlotIndexes *Indexes;
|
||||
CacheMap Cache;
|
||||
|
||||
public:
|
||||
static char ID; // Pass identification, replacement for typeid
|
||||
|
||||
MachineLoopRanges() : MachineFunctionPass(ID), Indexes(0) {}
|
||||
~MachineLoopRanges() { releaseMemory(); }
|
||||
|
||||
/// getLoopRange - Return the range of loop.
|
||||
MachineLoopRange *getLoopRange(const MachineLoop *Loop);
|
||||
|
||||
private:
|
||||
virtual bool runOnMachineFunction(MachineFunction&);
|
||||
virtual void releaseMemory();
|
||||
virtual void getAnalysisUsage(AnalysisUsage&) const;
|
||||
};
|
||||
|
||||
|
||||
} // end namespace llvm
|
||||
|
||||
#endif // LLVM_CODEGEN_MACHINELOOPRANGES_H
|
||||
183
thirdparty/clang/include/llvm/CodeGen/MachineMemOperand.h
vendored
Normal file
183
thirdparty/clang/include/llvm/CodeGen/MachineMemOperand.h
vendored
Normal file
@@ -0,0 +1,183 @@
|
||||
//==- llvm/CodeGen/MachineMemOperand.h - MachineMemOperand class -*- C++ -*-==//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file contains the declaration of the MachineMemOperand class, which is a
|
||||
// description of a memory reference. It is used to help track dependencies
|
||||
// in the backend.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINEMEMOPERAND_H
|
||||
#define LLVM_CODEGEN_MACHINEMEMOPERAND_H
|
||||
|
||||
#include "llvm/Support/DataTypes.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class Value;
|
||||
class FoldingSetNodeID;
|
||||
class MDNode;
|
||||
class raw_ostream;
|
||||
|
||||
/// MachinePointerInfo - This class contains a discriminated union of
|
||||
/// information about pointers in memory operands, relating them back to LLVM IR
|
||||
/// or to virtual locations (such as frame indices) that are exposed during
|
||||
/// codegen.
|
||||
struct MachinePointerInfo {
|
||||
/// V - This is the IR pointer value for the access, or it is null if unknown.
|
||||
/// If this is null, then the access is to a pointer in the default address
|
||||
/// space.
|
||||
const Value *V;
|
||||
|
||||
/// Offset - This is an offset from the base Value*.
|
||||
int64_t Offset;
|
||||
|
||||
explicit MachinePointerInfo(const Value *v = 0, int64_t offset = 0)
|
||||
: V(v), Offset(offset) {}
|
||||
|
||||
MachinePointerInfo getWithOffset(int64_t O) const {
|
||||
if (V == 0) return MachinePointerInfo(0, 0);
|
||||
return MachinePointerInfo(V, Offset+O);
|
||||
}
|
||||
|
||||
/// getAddrSpace - Return the LLVM IR address space number that this pointer
|
||||
/// points into.
|
||||
unsigned getAddrSpace() const;
|
||||
|
||||
/// getConstantPool - Return a MachinePointerInfo record that refers to the
|
||||
/// constant pool.
|
||||
static MachinePointerInfo getConstantPool();
|
||||
|
||||
/// getFixedStack - Return a MachinePointerInfo record that refers to the
|
||||
/// the specified FrameIndex.
|
||||
static MachinePointerInfo getFixedStack(int FI, int64_t offset = 0);
|
||||
|
||||
/// getJumpTable - Return a MachinePointerInfo record that refers to a
|
||||
/// jump table entry.
|
||||
static MachinePointerInfo getJumpTable();
|
||||
|
||||
/// getGOT - Return a MachinePointerInfo record that refers to a
|
||||
/// GOT entry.
|
||||
static MachinePointerInfo getGOT();
|
||||
|
||||
/// getStack - stack pointer relative access.
|
||||
static MachinePointerInfo getStack(int64_t Offset);
|
||||
};
|
||||
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
/// MachineMemOperand - A description of a memory reference used in the backend.
|
||||
/// Instead of holding a StoreInst or LoadInst, this class holds the address
|
||||
/// Value of the reference along with a byte size and offset. This allows it
|
||||
/// to describe lowered loads and stores. Also, the special PseudoSourceValue
|
||||
/// objects can be used to represent loads and stores to memory locations
|
||||
/// that aren't explicit in the regular LLVM IR.
|
||||
///
|
||||
class MachineMemOperand {
|
||||
MachinePointerInfo PtrInfo;
|
||||
uint64_t Size;
|
||||
unsigned Flags;
|
||||
const MDNode *TBAAInfo;
|
||||
const MDNode *Ranges;
|
||||
|
||||
public:
|
||||
/// Flags values. These may be or'd together.
|
||||
enum MemOperandFlags {
|
||||
/// The memory access reads data.
|
||||
MOLoad = 1,
|
||||
/// The memory access writes data.
|
||||
MOStore = 2,
|
||||
/// The memory access is volatile.
|
||||
MOVolatile = 4,
|
||||
/// The memory access is non-temporal.
|
||||
MONonTemporal = 8,
|
||||
/// The memory access is invariant.
|
||||
MOInvariant = 16,
|
||||
// This is the number of bits we need to represent flags.
|
||||
MOMaxBits = 5
|
||||
};
|
||||
|
||||
/// MachineMemOperand - Construct an MachineMemOperand object with the
|
||||
/// specified PtrInfo, flags, size, and base alignment.
|
||||
MachineMemOperand(MachinePointerInfo PtrInfo, unsigned flags, uint64_t s,
|
||||
unsigned base_alignment, const MDNode *TBAAInfo = 0,
|
||||
const MDNode *Ranges = 0);
|
||||
|
||||
const MachinePointerInfo &getPointerInfo() const { return PtrInfo; }
|
||||
|
||||
/// getValue - Return the base address of the memory access. This may either
|
||||
/// be a normal LLVM IR Value, or one of the special values used in CodeGen.
|
||||
/// Special values are those obtained via
|
||||
/// PseudoSourceValue::getFixedStack(int), PseudoSourceValue::getStack, and
|
||||
/// other PseudoSourceValue member functions which return objects which stand
|
||||
/// for frame/stack pointer relative references and other special references
|
||||
/// which are not representable in the high-level IR.
|
||||
const Value *getValue() const { return PtrInfo.V; }
|
||||
|
||||
/// getFlags - Return the raw flags of the source value, \see MemOperandFlags.
|
||||
unsigned int getFlags() const { return Flags & ((1 << MOMaxBits) - 1); }
|
||||
|
||||
/// getOffset - For normal values, this is a byte offset added to the base
|
||||
/// address. For PseudoSourceValue::FPRel values, this is the FrameIndex
|
||||
/// number.
|
||||
int64_t getOffset() const { return PtrInfo.Offset; }
|
||||
|
||||
/// getSize - Return the size in bytes of the memory reference.
|
||||
uint64_t getSize() const { return Size; }
|
||||
|
||||
/// getAlignment - Return the minimum known alignment in bytes of the
|
||||
/// actual memory reference.
|
||||
uint64_t getAlignment() const;
|
||||
|
||||
/// getBaseAlignment - Return the minimum known alignment in bytes of the
|
||||
/// base address, without the offset.
|
||||
uint64_t getBaseAlignment() const { return (1u << (Flags >> MOMaxBits)) >> 1; }
|
||||
|
||||
/// getTBAAInfo - Return the TBAA tag for the memory reference.
|
||||
const MDNode *getTBAAInfo() const { return TBAAInfo; }
|
||||
|
||||
/// getRanges - Return the range tag for the memory reference.
|
||||
const MDNode *getRanges() const { return Ranges; }
|
||||
|
||||
bool isLoad() const { return Flags & MOLoad; }
|
||||
bool isStore() const { return Flags & MOStore; }
|
||||
bool isVolatile() const { return Flags & MOVolatile; }
|
||||
bool isNonTemporal() const { return Flags & MONonTemporal; }
|
||||
bool isInvariant() const { return Flags & MOInvariant; }
|
||||
|
||||
/// isUnordered - Returns true if this memory operation doesn't have any
|
||||
/// ordering constraints other than normal aliasing. Volatile and atomic
|
||||
/// memory operations can't be reordered.
|
||||
///
|
||||
/// Currently, we don't model the difference between volatile and atomic
|
||||
/// operations. They should retain their ordering relative to all memory
|
||||
/// operations.
|
||||
bool isUnordered() const { return !isVolatile(); }
|
||||
|
||||
/// refineAlignment - Update this MachineMemOperand to reflect the alignment
|
||||
/// of MMO, if it has a greater alignment. This must only be used when the
|
||||
/// new alignment applies to all users of this MachineMemOperand.
|
||||
void refineAlignment(const MachineMemOperand *MMO);
|
||||
|
||||
/// setValue - Change the SourceValue for this MachineMemOperand. This
|
||||
/// should only be used when an object is being relocated and all references
|
||||
/// to it are being updated.
|
||||
void setValue(const Value *NewSV) { PtrInfo.V = NewSV; }
|
||||
void setOffset(int64_t NewOffset) { PtrInfo.Offset = NewOffset; }
|
||||
|
||||
/// Profile - Gather unique data for the object.
|
||||
///
|
||||
void Profile(FoldingSetNodeID &ID) const;
|
||||
};
|
||||
|
||||
raw_ostream &operator<<(raw_ostream &OS, const MachineMemOperand &MRO);
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
405
thirdparty/clang/include/llvm/CodeGen/MachineModuleInfo.h
vendored
Normal file
405
thirdparty/clang/include/llvm/CodeGen/MachineModuleInfo.h
vendored
Normal file
@@ -0,0 +1,405 @@
|
||||
//===-- llvm/CodeGen/MachineModuleInfo.h ------------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Collect meta information for a module. This information should be in a
|
||||
// neutral form that can be used by different debugging and exception handling
|
||||
// schemes.
|
||||
//
|
||||
// The organization of information is primarily clustered around the source
|
||||
// compile units. The main exception is source line correspondence where
|
||||
// inlining may interleave code from various compile units.
|
||||
//
|
||||
// The following information can be retrieved from the MachineModuleInfo.
|
||||
//
|
||||
// -- Source directories - Directories are uniqued based on their canonical
|
||||
// string and assigned a sequential numeric ID (base 1.)
|
||||
// -- Source files - Files are also uniqued based on their name and directory
|
||||
// ID. A file ID is sequential number (base 1.)
|
||||
// -- Source line correspondence - A vector of file ID, line#, column# triples.
|
||||
// A DEBUG_LOCATION instruction is generated by the DAG Legalizer
|
||||
// corresponding to each entry in the source line list. This allows a debug
|
||||
// emitter to generate labels referenced by debug information tables.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINEMODULEINFO_H
|
||||
#define LLVM_CODEGEN_MACHINEMODULEINFO_H
|
||||
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/ADT/PointerIntPair.h"
|
||||
#include "llvm/ADT/SmallPtrSet.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/IR/Metadata.h"
|
||||
#include "llvm/MC/MCContext.h"
|
||||
#include "llvm/MC/MachineLocation.h"
|
||||
#include "llvm/Pass.h"
|
||||
#include "llvm/Support/DataTypes.h"
|
||||
#include "llvm/Support/DebugLoc.h"
|
||||
#include "llvm/Support/Dwarf.h"
|
||||
#include "llvm/Support/ValueHandle.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Forward declarations.
|
||||
class Constant;
|
||||
class GlobalVariable;
|
||||
class MDNode;
|
||||
class MMIAddrLabelMap;
|
||||
class MachineBasicBlock;
|
||||
class MachineFunction;
|
||||
class Module;
|
||||
class PointerType;
|
||||
class StructType;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
/// LandingPadInfo - This structure is used to retain landing pad info for
|
||||
/// the current function.
|
||||
///
|
||||
struct LandingPadInfo {
|
||||
MachineBasicBlock *LandingPadBlock; // Landing pad block.
|
||||
SmallVector<MCSymbol*, 1> BeginLabels; // Labels prior to invoke.
|
||||
SmallVector<MCSymbol*, 1> EndLabels; // Labels after invoke.
|
||||
MCSymbol *LandingPadLabel; // Label at beginning of landing pad.
|
||||
const Function *Personality; // Personality function.
|
||||
std::vector<int> TypeIds; // List of type ids (filters negative)
|
||||
|
||||
explicit LandingPadInfo(MachineBasicBlock *MBB)
|
||||
: LandingPadBlock(MBB), LandingPadLabel(0), Personality(0) {}
|
||||
};
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
/// MachineModuleInfoImpl - This class can be derived from and used by targets
|
||||
/// to hold private target-specific information for each Module. Objects of
|
||||
/// type are accessed/created with MMI::getInfo and destroyed when the
|
||||
/// MachineModuleInfo is destroyed.
|
||||
///
|
||||
class MachineModuleInfoImpl {
|
||||
public:
|
||||
typedef PointerIntPair<MCSymbol*, 1, bool> StubValueTy;
|
||||
virtual ~MachineModuleInfoImpl();
|
||||
typedef std::vector<std::pair<MCSymbol*, StubValueTy> > SymbolListTy;
|
||||
protected:
|
||||
static SymbolListTy GetSortedStubs(const DenseMap<MCSymbol*, StubValueTy>&);
|
||||
};
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
/// MachineModuleInfo - This class contains meta information specific to a
|
||||
/// module. Queries can be made by different debugging and exception handling
|
||||
/// schemes and reformated for specific use.
|
||||
///
|
||||
class MachineModuleInfo : public ImmutablePass {
|
||||
/// Context - This is the MCContext used for the entire code generator.
|
||||
MCContext Context;
|
||||
|
||||
/// TheModule - This is the LLVM Module being worked on.
|
||||
const Module *TheModule;
|
||||
|
||||
/// ObjFileMMI - This is the object-file-format-specific implementation of
|
||||
/// MachineModuleInfoImpl, which lets targets accumulate whatever info they
|
||||
/// want.
|
||||
MachineModuleInfoImpl *ObjFileMMI;
|
||||
|
||||
/// FrameMoves - List of moves done by a function's prolog. Used to construct
|
||||
/// frame maps by debug and exception handling consumers.
|
||||
std::vector<MachineMove> FrameMoves;
|
||||
|
||||
/// CompactUnwindEncoding - If the target supports it, this is the compact
|
||||
/// unwind encoding. It replaces a function's CIE and FDE.
|
||||
uint32_t CompactUnwindEncoding;
|
||||
|
||||
/// LandingPads - List of LandingPadInfo describing the landing pad
|
||||
/// information in the current function.
|
||||
std::vector<LandingPadInfo> LandingPads;
|
||||
|
||||
/// LPadToCallSiteMap - Map a landing pad's EH symbol to the call site
|
||||
/// indexes.
|
||||
DenseMap<MCSymbol*, SmallVector<unsigned, 4> > LPadToCallSiteMap;
|
||||
|
||||
/// CallSiteMap - Map of invoke call site index values to associated begin
|
||||
/// EH_LABEL for the current function.
|
||||
DenseMap<MCSymbol*, unsigned> CallSiteMap;
|
||||
|
||||
/// CurCallSite - The current call site index being processed, if any. 0 if
|
||||
/// none.
|
||||
unsigned CurCallSite;
|
||||
|
||||
/// TypeInfos - List of C++ TypeInfo used in the current function.
|
||||
std::vector<const GlobalVariable *> TypeInfos;
|
||||
|
||||
/// FilterIds - List of typeids encoding filters used in the current function.
|
||||
std::vector<unsigned> FilterIds;
|
||||
|
||||
/// FilterEnds - List of the indices in FilterIds corresponding to filter
|
||||
/// terminators.
|
||||
std::vector<unsigned> FilterEnds;
|
||||
|
||||
/// Personalities - Vector of all personality functions ever seen. Used to
|
||||
/// emit common EH frames.
|
||||
std::vector<const Function *> Personalities;
|
||||
|
||||
/// UsedFunctions - The functions in the @llvm.used list in a more easily
|
||||
/// searchable format. This does not include the functions in
|
||||
/// llvm.compiler.used.
|
||||
SmallPtrSet<const Function *, 32> UsedFunctions;
|
||||
|
||||
/// AddrLabelSymbols - This map keeps track of which symbol is being used for
|
||||
/// the specified basic block's address of label.
|
||||
MMIAddrLabelMap *AddrLabelSymbols;
|
||||
|
||||
bool CallsEHReturn;
|
||||
bool CallsUnwindInit;
|
||||
|
||||
/// DbgInfoAvailable - True if debugging information is available
|
||||
/// in this module.
|
||||
bool DbgInfoAvailable;
|
||||
|
||||
/// UsesVAFloatArgument - True if this module calls VarArg function with
|
||||
/// floating-point arguments. This is used to emit an undefined reference
|
||||
/// to _fltused on Windows targets.
|
||||
bool UsesVAFloatArgument;
|
||||
|
||||
public:
|
||||
static char ID; // Pass identification, replacement for typeid
|
||||
|
||||
typedef std::pair<unsigned, DebugLoc> UnsignedDebugLocPair;
|
||||
typedef SmallVector<std::pair<TrackingVH<MDNode>, UnsignedDebugLocPair>, 4>
|
||||
VariableDbgInfoMapTy;
|
||||
VariableDbgInfoMapTy VariableDbgInfo;
|
||||
|
||||
MachineModuleInfo(); // DUMMY CONSTRUCTOR, DO NOT CALL.
|
||||
// Real constructor.
|
||||
MachineModuleInfo(const MCAsmInfo &MAI, const MCRegisterInfo &MRI,
|
||||
const MCObjectFileInfo *MOFI);
|
||||
~MachineModuleInfo();
|
||||
|
||||
// Initialization and Finalization
|
||||
virtual bool doInitialization(Module &);
|
||||
virtual bool doFinalization(Module &);
|
||||
|
||||
/// EndFunction - Discard function meta information.
|
||||
///
|
||||
void EndFunction();
|
||||
|
||||
const MCContext &getContext() const { return Context; }
|
||||
MCContext &getContext() { return Context; }
|
||||
|
||||
void setModule(const Module *M) { TheModule = M; }
|
||||
const Module *getModule() const { return TheModule; }
|
||||
|
||||
/// getInfo - Keep track of various per-function pieces of information for
|
||||
/// backends that would like to do so.
|
||||
///
|
||||
template<typename Ty>
|
||||
Ty &getObjFileInfo() {
|
||||
if (ObjFileMMI == 0)
|
||||
ObjFileMMI = new Ty(*this);
|
||||
return *static_cast<Ty*>(ObjFileMMI);
|
||||
}
|
||||
|
||||
template<typename Ty>
|
||||
const Ty &getObjFileInfo() const {
|
||||
return const_cast<MachineModuleInfo*>(this)->getObjFileInfo<Ty>();
|
||||
}
|
||||
|
||||
/// AnalyzeModule - Scan the module for global debug information.
|
||||
///
|
||||
void AnalyzeModule(const Module &M);
|
||||
|
||||
/// hasDebugInfo - Returns true if valid debug info is present.
|
||||
///
|
||||
bool hasDebugInfo() const { return DbgInfoAvailable; }
|
||||
void setDebugInfoAvailability(bool avail) { DbgInfoAvailable = avail; }
|
||||
|
||||
bool callsEHReturn() const { return CallsEHReturn; }
|
||||
void setCallsEHReturn(bool b) { CallsEHReturn = b; }
|
||||
|
||||
bool callsUnwindInit() const { return CallsUnwindInit; }
|
||||
void setCallsUnwindInit(bool b) { CallsUnwindInit = b; }
|
||||
|
||||
bool usesVAFloatArgument() const {
|
||||
return UsesVAFloatArgument;
|
||||
}
|
||||
|
||||
void setUsesVAFloatArgument(bool b) {
|
||||
UsesVAFloatArgument = b;
|
||||
}
|
||||
|
||||
/// getFrameMoves - Returns a reference to a list of moves done in the current
|
||||
/// function's prologue. Used to construct frame maps for debug and exception
|
||||
/// handling comsumers.
|
||||
std::vector<MachineMove> &getFrameMoves() { return FrameMoves; }
|
||||
|
||||
/// getCompactUnwindEncoding - Returns the compact unwind encoding for a
|
||||
/// function if the target supports the encoding. This encoding replaces a
|
||||
/// function's CIE and FDE.
|
||||
uint32_t getCompactUnwindEncoding() const { return CompactUnwindEncoding; }
|
||||
|
||||
/// setCompactUnwindEncoding - Set the compact unwind encoding for a function
|
||||
/// if the target supports the encoding.
|
||||
void setCompactUnwindEncoding(uint32_t Enc) { CompactUnwindEncoding = Enc; }
|
||||
|
||||
/// getAddrLabelSymbol - Return the symbol to be used for the specified basic
|
||||
/// block when its address is taken. This cannot be its normal LBB label
|
||||
/// because the block may be accessed outside its containing function.
|
||||
MCSymbol *getAddrLabelSymbol(const BasicBlock *BB);
|
||||
|
||||
/// getAddrLabelSymbolToEmit - Return the symbol to be used for the specified
|
||||
/// basic block when its address is taken. If other blocks were RAUW'd to
|
||||
/// this one, we may have to emit them as well, return the whole set.
|
||||
std::vector<MCSymbol*> getAddrLabelSymbolToEmit(const BasicBlock *BB);
|
||||
|
||||
/// takeDeletedSymbolsForFunction - If the specified function has had any
|
||||
/// references to address-taken blocks generated, but the block got deleted,
|
||||
/// return the symbol now so we can emit it. This prevents emitting a
|
||||
/// reference to a symbol that has no definition.
|
||||
void takeDeletedSymbolsForFunction(const Function *F,
|
||||
std::vector<MCSymbol*> &Result);
|
||||
|
||||
|
||||
//===- EH ---------------------------------------------------------------===//
|
||||
|
||||
/// getOrCreateLandingPadInfo - Find or create an LandingPadInfo for the
|
||||
/// specified MachineBasicBlock.
|
||||
LandingPadInfo &getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad);
|
||||
|
||||
/// addInvoke - Provide the begin and end labels of an invoke style call and
|
||||
/// associate it with a try landing pad block.
|
||||
void addInvoke(MachineBasicBlock *LandingPad,
|
||||
MCSymbol *BeginLabel, MCSymbol *EndLabel);
|
||||
|
||||
/// addLandingPad - Add a new panding pad. Returns the label ID for the
|
||||
/// landing pad entry.
|
||||
MCSymbol *addLandingPad(MachineBasicBlock *LandingPad);
|
||||
|
||||
/// addPersonality - Provide the personality function for the exception
|
||||
/// information.
|
||||
void addPersonality(MachineBasicBlock *LandingPad,
|
||||
const Function *Personality);
|
||||
|
||||
/// getPersonalityIndex - Get index of the current personality function inside
|
||||
/// Personalitites array
|
||||
unsigned getPersonalityIndex() const;
|
||||
|
||||
/// getPersonalities - Return array of personality functions ever seen.
|
||||
const std::vector<const Function *>& getPersonalities() const {
|
||||
return Personalities;
|
||||
}
|
||||
|
||||
/// isUsedFunction - Return true if the functions in the llvm.used list. This
|
||||
/// does not return true for things in llvm.compiler.used unless they are also
|
||||
/// in llvm.used.
|
||||
bool isUsedFunction(const Function *F) const {
|
||||
return UsedFunctions.count(F);
|
||||
}
|
||||
|
||||
/// addCatchTypeInfo - Provide the catch typeinfo for a landing pad.
|
||||
///
|
||||
void addCatchTypeInfo(MachineBasicBlock *LandingPad,
|
||||
ArrayRef<const GlobalVariable *> TyInfo);
|
||||
|
||||
/// addFilterTypeInfo - Provide the filter typeinfo for a landing pad.
|
||||
///
|
||||
void addFilterTypeInfo(MachineBasicBlock *LandingPad,
|
||||
ArrayRef<const GlobalVariable *> TyInfo);
|
||||
|
||||
/// addCleanup - Add a cleanup action for a landing pad.
|
||||
///
|
||||
void addCleanup(MachineBasicBlock *LandingPad);
|
||||
|
||||
/// getTypeIDFor - Return the type id for the specified typeinfo. This is
|
||||
/// function wide.
|
||||
unsigned getTypeIDFor(const GlobalVariable *TI);
|
||||
|
||||
/// getFilterIDFor - Return the id of the filter encoded by TyIds. This is
|
||||
/// function wide.
|
||||
int getFilterIDFor(std::vector<unsigned> &TyIds);
|
||||
|
||||
/// TidyLandingPads - Remap landing pad labels and remove any deleted landing
|
||||
/// pads.
|
||||
void TidyLandingPads(DenseMap<MCSymbol*, uintptr_t> *LPMap = 0);
|
||||
|
||||
/// getLandingPads - Return a reference to the landing pad info for the
|
||||
/// current function.
|
||||
const std::vector<LandingPadInfo> &getLandingPads() const {
|
||||
return LandingPads;
|
||||
}
|
||||
|
||||
/// setCallSiteLandingPad - Map the landing pad's EH symbol to the call
|
||||
/// site indexes.
|
||||
void setCallSiteLandingPad(MCSymbol *Sym, ArrayRef<unsigned> Sites);
|
||||
|
||||
/// getCallSiteLandingPad - Get the call site indexes for a landing pad EH
|
||||
/// symbol.
|
||||
SmallVectorImpl<unsigned> &getCallSiteLandingPad(MCSymbol *Sym) {
|
||||
assert(hasCallSiteLandingPad(Sym) &&
|
||||
"missing call site number for landing pad!");
|
||||
return LPadToCallSiteMap[Sym];
|
||||
}
|
||||
|
||||
/// hasCallSiteLandingPad - Return true if the landing pad Eh symbol has an
|
||||
/// associated call site.
|
||||
bool hasCallSiteLandingPad(MCSymbol *Sym) {
|
||||
return !LPadToCallSiteMap[Sym].empty();
|
||||
}
|
||||
|
||||
/// setCallSiteBeginLabel - Map the begin label for a call site.
|
||||
void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site) {
|
||||
CallSiteMap[BeginLabel] = Site;
|
||||
}
|
||||
|
||||
/// getCallSiteBeginLabel - Get the call site number for a begin label.
|
||||
unsigned getCallSiteBeginLabel(MCSymbol *BeginLabel) {
|
||||
assert(hasCallSiteBeginLabel(BeginLabel) &&
|
||||
"Missing call site number for EH_LABEL!");
|
||||
return CallSiteMap[BeginLabel];
|
||||
}
|
||||
|
||||
/// hasCallSiteBeginLabel - Return true if the begin label has a call site
|
||||
/// number associated with it.
|
||||
bool hasCallSiteBeginLabel(MCSymbol *BeginLabel) {
|
||||
return CallSiteMap[BeginLabel] != 0;
|
||||
}
|
||||
|
||||
/// setCurrentCallSite - Set the call site currently being processed.
|
||||
void setCurrentCallSite(unsigned Site) { CurCallSite = Site; }
|
||||
|
||||
/// getCurrentCallSite - Get the call site currently being processed, if any.
|
||||
/// return zero if none.
|
||||
unsigned getCurrentCallSite() { return CurCallSite; }
|
||||
|
||||
/// getTypeInfos - Return a reference to the C++ typeinfo for the current
|
||||
/// function.
|
||||
const std::vector<const GlobalVariable *> &getTypeInfos() const {
|
||||
return TypeInfos;
|
||||
}
|
||||
|
||||
/// getFilterIds - Return a reference to the typeids encoding filters used in
|
||||
/// the current function.
|
||||
const std::vector<unsigned> &getFilterIds() const {
|
||||
return FilterIds;
|
||||
}
|
||||
|
||||
/// getPersonality - Return a personality function if available. The presence
|
||||
/// of one is required to emit exception handling info.
|
||||
const Function *getPersonality() const;
|
||||
|
||||
/// setVariableDbgInfo - Collect information used to emit debugging
|
||||
/// information of a variable.
|
||||
void setVariableDbgInfo(MDNode *N, unsigned Slot, DebugLoc Loc) {
|
||||
VariableDbgInfo.push_back(std::make_pair(N, std::make_pair(Slot, Loc)));
|
||||
}
|
||||
|
||||
VariableDbgInfoMapTy &getVariableDbgInfo() { return VariableDbgInfo; }
|
||||
|
||||
}; // End class MachineModuleInfo
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
97
thirdparty/clang/include/llvm/CodeGen/MachineModuleInfoImpls.h
vendored
Normal file
97
thirdparty/clang/include/llvm/CodeGen/MachineModuleInfoImpls.h
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
//===-- llvm/CodeGen/MachineModuleInfoImpls.h -------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines object-file format specific implementations of
|
||||
// MachineModuleInfoImpl.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINEMODULEINFOIMPLS_H
|
||||
#define LLVM_CODEGEN_MACHINEMODULEINFOIMPLS_H
|
||||
|
||||
#include "llvm/CodeGen/MachineModuleInfo.h"
|
||||
|
||||
namespace llvm {
|
||||
class MCSymbol;
|
||||
|
||||
/// MachineModuleInfoMachO - This is a MachineModuleInfoImpl implementation
|
||||
/// for MachO targets.
|
||||
class MachineModuleInfoMachO : public MachineModuleInfoImpl {
|
||||
/// FnStubs - Darwin '$stub' stubs. The key is something like "Lfoo$stub",
|
||||
/// the value is something like "_foo".
|
||||
DenseMap<MCSymbol*, StubValueTy> FnStubs;
|
||||
|
||||
/// GVStubs - Darwin '$non_lazy_ptr' stubs. The key is something like
|
||||
/// "Lfoo$non_lazy_ptr", the value is something like "_foo". The extra bit
|
||||
/// is true if this GV is external.
|
||||
DenseMap<MCSymbol*, StubValueTy> GVStubs;
|
||||
|
||||
/// HiddenGVStubs - Darwin '$non_lazy_ptr' stubs. The key is something like
|
||||
/// "Lfoo$non_lazy_ptr", the value is something like "_foo". Unlike GVStubs
|
||||
/// these are for things with hidden visibility. The extra bit is true if
|
||||
/// this GV is external.
|
||||
DenseMap<MCSymbol*, StubValueTy> HiddenGVStubs;
|
||||
|
||||
virtual void anchor(); // Out of line virtual method.
|
||||
public:
|
||||
MachineModuleInfoMachO(const MachineModuleInfo &) {}
|
||||
|
||||
StubValueTy &getFnStubEntry(MCSymbol *Sym) {
|
||||
assert(Sym && "Key cannot be null");
|
||||
return FnStubs[Sym];
|
||||
}
|
||||
|
||||
StubValueTy &getGVStubEntry(MCSymbol *Sym) {
|
||||
assert(Sym && "Key cannot be null");
|
||||
return GVStubs[Sym];
|
||||
}
|
||||
|
||||
StubValueTy &getHiddenGVStubEntry(MCSymbol *Sym) {
|
||||
assert(Sym && "Key cannot be null");
|
||||
return HiddenGVStubs[Sym];
|
||||
}
|
||||
|
||||
/// Accessor methods to return the set of stubs in sorted order.
|
||||
SymbolListTy GetFnStubList() const {
|
||||
return GetSortedStubs(FnStubs);
|
||||
}
|
||||
SymbolListTy GetGVStubList() const {
|
||||
return GetSortedStubs(GVStubs);
|
||||
}
|
||||
SymbolListTy GetHiddenGVStubList() const {
|
||||
return GetSortedStubs(HiddenGVStubs);
|
||||
}
|
||||
};
|
||||
|
||||
/// MachineModuleInfoELF - This is a MachineModuleInfoImpl implementation
|
||||
/// for ELF targets.
|
||||
class MachineModuleInfoELF : public MachineModuleInfoImpl {
|
||||
/// GVStubs - These stubs are used to materialize global addresses in PIC
|
||||
/// mode.
|
||||
DenseMap<MCSymbol*, StubValueTy> GVStubs;
|
||||
|
||||
virtual void anchor(); // Out of line virtual method.
|
||||
public:
|
||||
MachineModuleInfoELF(const MachineModuleInfo &) {}
|
||||
|
||||
StubValueTy &getGVStubEntry(MCSymbol *Sym) {
|
||||
assert(Sym && "Key cannot be null");
|
||||
return GVStubs[Sym];
|
||||
}
|
||||
|
||||
/// Accessor methods to return the set of stubs in sorted order.
|
||||
|
||||
SymbolListTy GetGVStubList() const {
|
||||
return GetSortedStubs(GVStubs);
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
698
thirdparty/clang/include/llvm/CodeGen/MachineOperand.h
vendored
Normal file
698
thirdparty/clang/include/llvm/CodeGen/MachineOperand.h
vendored
Normal file
@@ -0,0 +1,698 @@
|
||||
//===-- llvm/CodeGen/MachineOperand.h - MachineOperand class ----*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file contains the declaration of the MachineOperand class.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINEOPERAND_H
|
||||
#define LLVM_CODEGEN_MACHINEOPERAND_H
|
||||
|
||||
#include "llvm/Support/DataTypes.h"
|
||||
#include <cassert>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class BlockAddress;
|
||||
class ConstantFP;
|
||||
class ConstantInt;
|
||||
class GlobalValue;
|
||||
class MachineBasicBlock;
|
||||
class MachineInstr;
|
||||
class MachineRegisterInfo;
|
||||
class MDNode;
|
||||
class TargetMachine;
|
||||
class TargetRegisterInfo;
|
||||
class hash_code;
|
||||
class raw_ostream;
|
||||
class MCSymbol;
|
||||
|
||||
/// MachineOperand class - Representation of each machine instruction operand.
|
||||
///
|
||||
/// This class isn't a POD type because it has a private constructor, but its
|
||||
/// destructor must be trivial. Functions like MachineInstr::addOperand(),
|
||||
/// MachineRegisterInfo::moveOperands(), and MF::DeleteMachineInstr() depend on
|
||||
/// not having to call the MachineOperand destructor.
|
||||
///
|
||||
class MachineOperand {
|
||||
public:
|
||||
enum MachineOperandType {
|
||||
MO_Register, ///< Register operand.
|
||||
MO_Immediate, ///< Immediate operand
|
||||
MO_CImmediate, ///< Immediate >64bit operand
|
||||
MO_FPImmediate, ///< Floating-point immediate operand
|
||||
MO_MachineBasicBlock, ///< MachineBasicBlock reference
|
||||
MO_FrameIndex, ///< Abstract Stack Frame Index
|
||||
MO_ConstantPoolIndex, ///< Address of indexed Constant in Constant Pool
|
||||
MO_TargetIndex, ///< Target-dependent index+offset operand.
|
||||
MO_JumpTableIndex, ///< Address of indexed Jump Table for switch
|
||||
MO_ExternalSymbol, ///< Name of external global symbol
|
||||
MO_GlobalAddress, ///< Address of a global value
|
||||
MO_BlockAddress, ///< Address of a basic block
|
||||
MO_RegisterMask, ///< Mask of preserved registers.
|
||||
MO_Metadata, ///< Metadata reference (for debug info)
|
||||
MO_MCSymbol ///< MCSymbol reference (for debug/eh info)
|
||||
};
|
||||
|
||||
private:
|
||||
/// OpKind - Specify what kind of operand this is. This discriminates the
|
||||
/// union.
|
||||
unsigned char OpKind; // MachineOperandType
|
||||
|
||||
/// Subregister number for MO_Register. A value of 0 indicates the
|
||||
/// MO_Register has no subReg.
|
||||
///
|
||||
/// For all other kinds of operands, this field holds target-specific flags.
|
||||
unsigned SubReg_TargetFlags : 12;
|
||||
|
||||
/// TiedTo - Non-zero when this register operand is tied to another register
|
||||
/// operand. The encoding of this field is described in the block comment
|
||||
/// before MachineInstr::tieOperands().
|
||||
unsigned char TiedTo : 4;
|
||||
|
||||
/// IsDef/IsImp/IsKill/IsDead flags - These are only valid for MO_Register
|
||||
/// operands.
|
||||
|
||||
/// IsDef - True if this is a def, false if this is a use of the register.
|
||||
///
|
||||
bool IsDef : 1;
|
||||
|
||||
/// IsImp - True if this is an implicit def or use, false if it is explicit.
|
||||
///
|
||||
bool IsImp : 1;
|
||||
|
||||
/// IsKill - True if this instruction is the last use of the register on this
|
||||
/// path through the function. This is only valid on uses of registers.
|
||||
bool IsKill : 1;
|
||||
|
||||
/// IsDead - True if this register is never used by a subsequent instruction.
|
||||
/// This is only valid on definitions of registers.
|
||||
bool IsDead : 1;
|
||||
|
||||
/// IsUndef - True if this register operand reads an "undef" value, i.e. the
|
||||
/// read value doesn't matter. This flag can be set on both use and def
|
||||
/// operands. On a sub-register def operand, it refers to the part of the
|
||||
/// register that isn't written. On a full-register def operand, it is a
|
||||
/// noop. See readsReg().
|
||||
///
|
||||
/// This is only valid on registers.
|
||||
///
|
||||
/// Note that an instruction may have multiple <undef> operands referring to
|
||||
/// the same register. In that case, the instruction may depend on those
|
||||
/// operands reading the same dont-care value. For example:
|
||||
///
|
||||
/// %vreg1<def> = XOR %vreg2<undef>, %vreg2<undef>
|
||||
///
|
||||
/// Any register can be used for %vreg2, and its value doesn't matter, but
|
||||
/// the two operands must be the same register.
|
||||
///
|
||||
bool IsUndef : 1;
|
||||
|
||||
/// IsInternalRead - True if this operand reads a value that was defined
|
||||
/// inside the same instruction or bundle. This flag can be set on both use
|
||||
/// and def operands. On a sub-register def operand, it refers to the part
|
||||
/// of the register that isn't written. On a full-register def operand, it
|
||||
/// is a noop.
|
||||
///
|
||||
/// When this flag is set, the instruction bundle must contain at least one
|
||||
/// other def of the register. If multiple instructions in the bundle define
|
||||
/// the register, the meaning is target-defined.
|
||||
bool IsInternalRead : 1;
|
||||
|
||||
/// IsEarlyClobber - True if this MO_Register 'def' operand is written to
|
||||
/// by the MachineInstr before all input registers are read. This is used to
|
||||
/// model the GCC inline asm '&' constraint modifier.
|
||||
bool IsEarlyClobber : 1;
|
||||
|
||||
/// IsDebug - True if this MO_Register 'use' operand is in a debug pseudo,
|
||||
/// not a real instruction. Such uses should be ignored during codegen.
|
||||
bool IsDebug : 1;
|
||||
|
||||
/// SmallContents - This really should be part of the Contents union, but
|
||||
/// lives out here so we can get a better packed struct.
|
||||
/// MO_Register: Register number.
|
||||
/// OffsetedInfo: Low bits of offset.
|
||||
union {
|
||||
unsigned RegNo; // For MO_Register.
|
||||
unsigned OffsetLo; // Matches Contents.OffsetedInfo.OffsetHi.
|
||||
} SmallContents;
|
||||
|
||||
/// ParentMI - This is the instruction that this operand is embedded into.
|
||||
/// This is valid for all operand types, when the operand is in an instr.
|
||||
MachineInstr *ParentMI;
|
||||
|
||||
/// Contents union - This contains the payload for the various operand types.
|
||||
union {
|
||||
MachineBasicBlock *MBB; // For MO_MachineBasicBlock.
|
||||
const ConstantFP *CFP; // For MO_FPImmediate.
|
||||
const ConstantInt *CI; // For MO_CImmediate. Integers > 64bit.
|
||||
int64_t ImmVal; // For MO_Immediate.
|
||||
const uint32_t *RegMask; // For MO_RegisterMask.
|
||||
const MDNode *MD; // For MO_Metadata.
|
||||
MCSymbol *Sym; // For MO_MCSymbol
|
||||
|
||||
struct { // For MO_Register.
|
||||
// Register number is in SmallContents.RegNo.
|
||||
MachineOperand *Prev; // Access list for register. See MRI.
|
||||
MachineOperand *Next;
|
||||
} Reg;
|
||||
|
||||
/// OffsetedInfo - This struct contains the offset and an object identifier.
|
||||
/// this represent the object as with an optional offset from it.
|
||||
struct {
|
||||
union {
|
||||
int Index; // For MO_*Index - The index itself.
|
||||
const char *SymbolName; // For MO_ExternalSymbol.
|
||||
const GlobalValue *GV; // For MO_GlobalAddress.
|
||||
const BlockAddress *BA; // For MO_BlockAddress.
|
||||
} Val;
|
||||
// Low bits of offset are in SmallContents.OffsetLo.
|
||||
int OffsetHi; // An offset from the object, high 32 bits.
|
||||
} OffsetedInfo;
|
||||
} Contents;
|
||||
|
||||
explicit MachineOperand(MachineOperandType K)
|
||||
: OpKind(K), SubReg_TargetFlags(0), ParentMI(0) {}
|
||||
public:
|
||||
/// getType - Returns the MachineOperandType for this operand.
|
||||
///
|
||||
MachineOperandType getType() const { return (MachineOperandType)OpKind; }
|
||||
|
||||
unsigned getTargetFlags() const {
|
||||
return isReg() ? 0 : SubReg_TargetFlags;
|
||||
}
|
||||
void setTargetFlags(unsigned F) {
|
||||
assert(!isReg() && "Register operands can't have target flags");
|
||||
SubReg_TargetFlags = F;
|
||||
assert(SubReg_TargetFlags == F && "Target flags out of range");
|
||||
}
|
||||
void addTargetFlag(unsigned F) {
|
||||
assert(!isReg() && "Register operands can't have target flags");
|
||||
SubReg_TargetFlags |= F;
|
||||
assert((SubReg_TargetFlags & F) && "Target flags out of range");
|
||||
}
|
||||
|
||||
|
||||
/// getParent - Return the instruction that this operand belongs to.
|
||||
///
|
||||
MachineInstr *getParent() { return ParentMI; }
|
||||
const MachineInstr *getParent() const { return ParentMI; }
|
||||
|
||||
/// clearParent - Reset the parent pointer.
|
||||
///
|
||||
/// The MachineOperand copy constructor also copies ParentMI, expecting the
|
||||
/// original to be deleted. If a MachineOperand is ever stored outside a
|
||||
/// MachineInstr, the parent pointer must be cleared.
|
||||
///
|
||||
/// Never call clearParent() on an operand in a MachineInstr.
|
||||
///
|
||||
void clearParent() { ParentMI = 0; }
|
||||
|
||||
void print(raw_ostream &os, const TargetMachine *TM = 0) const;
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Accessors that tell you what kind of MachineOperand you're looking at.
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
/// isReg - Tests if this is a MO_Register operand.
|
||||
bool isReg() const { return OpKind == MO_Register; }
|
||||
/// isImm - Tests if this is a MO_Immediate operand.
|
||||
bool isImm() const { return OpKind == MO_Immediate; }
|
||||
/// isCImm - Test if t his is a MO_CImmediate operand.
|
||||
bool isCImm() const { return OpKind == MO_CImmediate; }
|
||||
/// isFPImm - Tests if this is a MO_FPImmediate operand.
|
||||
bool isFPImm() const { return OpKind == MO_FPImmediate; }
|
||||
/// isMBB - Tests if this is a MO_MachineBasicBlock operand.
|
||||
bool isMBB() const { return OpKind == MO_MachineBasicBlock; }
|
||||
/// isFI - Tests if this is a MO_FrameIndex operand.
|
||||
bool isFI() const { return OpKind == MO_FrameIndex; }
|
||||
/// isCPI - Tests if this is a MO_ConstantPoolIndex operand.
|
||||
bool isCPI() const { return OpKind == MO_ConstantPoolIndex; }
|
||||
/// isTargetIndex - Tests if this is a MO_TargetIndex operand.
|
||||
bool isTargetIndex() const { return OpKind == MO_TargetIndex; }
|
||||
/// isJTI - Tests if this is a MO_JumpTableIndex operand.
|
||||
bool isJTI() const { return OpKind == MO_JumpTableIndex; }
|
||||
/// isGlobal - Tests if this is a MO_GlobalAddress operand.
|
||||
bool isGlobal() const { return OpKind == MO_GlobalAddress; }
|
||||
/// isSymbol - Tests if this is a MO_ExternalSymbol operand.
|
||||
bool isSymbol() const { return OpKind == MO_ExternalSymbol; }
|
||||
/// isBlockAddress - Tests if this is a MO_BlockAddress operand.
|
||||
bool isBlockAddress() const { return OpKind == MO_BlockAddress; }
|
||||
/// isRegMask - Tests if this is a MO_RegisterMask operand.
|
||||
bool isRegMask() const { return OpKind == MO_RegisterMask; }
|
||||
/// isMetadata - Tests if this is a MO_Metadata operand.
|
||||
bool isMetadata() const { return OpKind == MO_Metadata; }
|
||||
bool isMCSymbol() const { return OpKind == MO_MCSymbol; }
|
||||
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Accessors for Register Operands
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
/// getReg - Returns the register number.
|
||||
unsigned getReg() const {
|
||||
assert(isReg() && "This is not a register operand!");
|
||||
return SmallContents.RegNo;
|
||||
}
|
||||
|
||||
unsigned getSubReg() const {
|
||||
assert(isReg() && "Wrong MachineOperand accessor");
|
||||
return SubReg_TargetFlags;
|
||||
}
|
||||
|
||||
bool isUse() const {
|
||||
assert(isReg() && "Wrong MachineOperand accessor");
|
||||
return !IsDef;
|
||||
}
|
||||
|
||||
bool isDef() const {
|
||||
assert(isReg() && "Wrong MachineOperand accessor");
|
||||
return IsDef;
|
||||
}
|
||||
|
||||
bool isImplicit() const {
|
||||
assert(isReg() && "Wrong MachineOperand accessor");
|
||||
return IsImp;
|
||||
}
|
||||
|
||||
bool isDead() const {
|
||||
assert(isReg() && "Wrong MachineOperand accessor");
|
||||
return IsDead;
|
||||
}
|
||||
|
||||
bool isKill() const {
|
||||
assert(isReg() && "Wrong MachineOperand accessor");
|
||||
return IsKill;
|
||||
}
|
||||
|
||||
bool isUndef() const {
|
||||
assert(isReg() && "Wrong MachineOperand accessor");
|
||||
return IsUndef;
|
||||
}
|
||||
|
||||
bool isInternalRead() const {
|
||||
assert(isReg() && "Wrong MachineOperand accessor");
|
||||
return IsInternalRead;
|
||||
}
|
||||
|
||||
bool isEarlyClobber() const {
|
||||
assert(isReg() && "Wrong MachineOperand accessor");
|
||||
return IsEarlyClobber;
|
||||
}
|
||||
|
||||
bool isTied() const {
|
||||
assert(isReg() && "Wrong MachineOperand accessor");
|
||||
return TiedTo;
|
||||
}
|
||||
|
||||
bool isDebug() const {
|
||||
assert(isReg() && "Wrong MachineOperand accessor");
|
||||
return IsDebug;
|
||||
}
|
||||
|
||||
/// readsReg - Returns true if this operand reads the previous value of its
|
||||
/// register. A use operand with the <undef> flag set doesn't read its
|
||||
/// register. A sub-register def implicitly reads the other parts of the
|
||||
/// register being redefined unless the <undef> flag is set.
|
||||
///
|
||||
/// This refers to reading the register value from before the current
|
||||
/// instruction or bundle. Internal bundle reads are not included.
|
||||
bool readsReg() const {
|
||||
assert(isReg() && "Wrong MachineOperand accessor");
|
||||
return !isUndef() && !isInternalRead() && (isUse() || getSubReg());
|
||||
}
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Mutators for Register Operands
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
/// Change the register this operand corresponds to.
|
||||
///
|
||||
void setReg(unsigned Reg);
|
||||
|
||||
void setSubReg(unsigned subReg) {
|
||||
assert(isReg() && "Wrong MachineOperand accessor");
|
||||
SubReg_TargetFlags = subReg;
|
||||
assert(SubReg_TargetFlags == subReg && "SubReg out of range");
|
||||
}
|
||||
|
||||
/// substVirtReg - Substitute the current register with the virtual
|
||||
/// subregister Reg:SubReg. Take any existing SubReg index into account,
|
||||
/// using TargetRegisterInfo to compose the subreg indices if necessary.
|
||||
/// Reg must be a virtual register, SubIdx can be 0.
|
||||
///
|
||||
void substVirtReg(unsigned Reg, unsigned SubIdx, const TargetRegisterInfo&);
|
||||
|
||||
/// substPhysReg - Substitute the current register with the physical register
|
||||
/// Reg, taking any existing SubReg into account. For instance,
|
||||
/// substPhysReg(%EAX) will change %reg1024:sub_8bit to %AL.
|
||||
///
|
||||
void substPhysReg(unsigned Reg, const TargetRegisterInfo&);
|
||||
|
||||
void setIsUse(bool Val = true) { setIsDef(!Val); }
|
||||
|
||||
void setIsDef(bool Val = true);
|
||||
|
||||
void setImplicit(bool Val = true) {
|
||||
assert(isReg() && "Wrong MachineOperand accessor");
|
||||
IsImp = Val;
|
||||
}
|
||||
|
||||
void setIsKill(bool Val = true) {
|
||||
assert(isReg() && !IsDef && "Wrong MachineOperand accessor");
|
||||
assert((!Val || !isDebug()) && "Marking a debug operation as kill");
|
||||
IsKill = Val;
|
||||
}
|
||||
|
||||
void setIsDead(bool Val = true) {
|
||||
assert(isReg() && IsDef && "Wrong MachineOperand accessor");
|
||||
IsDead = Val;
|
||||
}
|
||||
|
||||
void setIsUndef(bool Val = true) {
|
||||
assert(isReg() && "Wrong MachineOperand accessor");
|
||||
IsUndef = Val;
|
||||
}
|
||||
|
||||
void setIsInternalRead(bool Val = true) {
|
||||
assert(isReg() && "Wrong MachineOperand accessor");
|
||||
IsInternalRead = Val;
|
||||
}
|
||||
|
||||
void setIsEarlyClobber(bool Val = true) {
|
||||
assert(isReg() && IsDef && "Wrong MachineOperand accessor");
|
||||
IsEarlyClobber = Val;
|
||||
}
|
||||
|
||||
void setIsDebug(bool Val = true) {
|
||||
assert(isReg() && IsDef && "Wrong MachineOperand accessor");
|
||||
IsDebug = Val;
|
||||
}
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Accessors for various operand types.
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
int64_t getImm() const {
|
||||
assert(isImm() && "Wrong MachineOperand accessor");
|
||||
return Contents.ImmVal;
|
||||
}
|
||||
|
||||
const ConstantInt *getCImm() const {
|
||||
assert(isCImm() && "Wrong MachineOperand accessor");
|
||||
return Contents.CI;
|
||||
}
|
||||
|
||||
const ConstantFP *getFPImm() const {
|
||||
assert(isFPImm() && "Wrong MachineOperand accessor");
|
||||
return Contents.CFP;
|
||||
}
|
||||
|
||||
MachineBasicBlock *getMBB() const {
|
||||
assert(isMBB() && "Wrong MachineOperand accessor");
|
||||
return Contents.MBB;
|
||||
}
|
||||
|
||||
int getIndex() const {
|
||||
assert((isFI() || isCPI() || isTargetIndex() || isJTI()) &&
|
||||
"Wrong MachineOperand accessor");
|
||||
return Contents.OffsetedInfo.Val.Index;
|
||||
}
|
||||
|
||||
const GlobalValue *getGlobal() const {
|
||||
assert(isGlobal() && "Wrong MachineOperand accessor");
|
||||
return Contents.OffsetedInfo.Val.GV;
|
||||
}
|
||||
|
||||
const BlockAddress *getBlockAddress() const {
|
||||
assert(isBlockAddress() && "Wrong MachineOperand accessor");
|
||||
return Contents.OffsetedInfo.Val.BA;
|
||||
}
|
||||
|
||||
MCSymbol *getMCSymbol() const {
|
||||
assert(isMCSymbol() && "Wrong MachineOperand accessor");
|
||||
return Contents.Sym;
|
||||
}
|
||||
|
||||
/// getOffset - Return the offset from the symbol in this operand. This always
|
||||
/// returns 0 for ExternalSymbol operands.
|
||||
int64_t getOffset() const {
|
||||
assert((isGlobal() || isSymbol() || isCPI() || isTargetIndex() ||
|
||||
isBlockAddress()) && "Wrong MachineOperand accessor");
|
||||
return int64_t(uint64_t(Contents.OffsetedInfo.OffsetHi) << 32) |
|
||||
SmallContents.OffsetLo;
|
||||
}
|
||||
|
||||
const char *getSymbolName() const {
|
||||
assert(isSymbol() && "Wrong MachineOperand accessor");
|
||||
return Contents.OffsetedInfo.Val.SymbolName;
|
||||
}
|
||||
|
||||
/// clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
|
||||
/// It is sometimes necessary to detach the register mask pointer from its
|
||||
/// machine operand. This static method can be used for such detached bit
|
||||
/// mask pointers.
|
||||
static bool clobbersPhysReg(const uint32_t *RegMask, unsigned PhysReg) {
|
||||
// See TargetRegisterInfo.h.
|
||||
assert(PhysReg < (1u << 30) && "Not a physical register");
|
||||
return !(RegMask[PhysReg / 32] & (1u << PhysReg % 32));
|
||||
}
|
||||
|
||||
/// clobbersPhysReg - Returns true if this RegMask operand clobbers PhysReg.
|
||||
bool clobbersPhysReg(unsigned PhysReg) const {
|
||||
return clobbersPhysReg(getRegMask(), PhysReg);
|
||||
}
|
||||
|
||||
/// getRegMask - Returns a bit mask of registers preserved by this RegMask
|
||||
/// operand.
|
||||
const uint32_t *getRegMask() const {
|
||||
assert(isRegMask() && "Wrong MachineOperand accessor");
|
||||
return Contents.RegMask;
|
||||
}
|
||||
|
||||
const MDNode *getMetadata() const {
|
||||
assert(isMetadata() && "Wrong MachineOperand accessor");
|
||||
return Contents.MD;
|
||||
}
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Mutators for various operand types.
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
void setImm(int64_t immVal) {
|
||||
assert(isImm() && "Wrong MachineOperand mutator");
|
||||
Contents.ImmVal = immVal;
|
||||
}
|
||||
|
||||
void setOffset(int64_t Offset) {
|
||||
assert((isGlobal() || isSymbol() || isCPI() || isTargetIndex() ||
|
||||
isBlockAddress()) && "Wrong MachineOperand accessor");
|
||||
SmallContents.OffsetLo = unsigned(Offset);
|
||||
Contents.OffsetedInfo.OffsetHi = int(Offset >> 32);
|
||||
}
|
||||
|
||||
void setIndex(int Idx) {
|
||||
assert((isFI() || isCPI() || isTargetIndex() || isJTI()) &&
|
||||
"Wrong MachineOperand accessor");
|
||||
Contents.OffsetedInfo.Val.Index = Idx;
|
||||
}
|
||||
|
||||
void setMBB(MachineBasicBlock *MBB) {
|
||||
assert(isMBB() && "Wrong MachineOperand accessor");
|
||||
Contents.MBB = MBB;
|
||||
}
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Other methods.
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
/// isIdenticalTo - Return true if this operand is identical to the specified
|
||||
/// operand. Note: This method ignores isKill and isDead properties.
|
||||
bool isIdenticalTo(const MachineOperand &Other) const;
|
||||
|
||||
/// \brief MachineOperand hash_value overload.
|
||||
///
|
||||
/// Note that this includes the same information in the hash that
|
||||
/// isIdenticalTo uses for comparison. It is thus suited for use in hash
|
||||
/// tables which use that function for equality comparisons only.
|
||||
friend hash_code hash_value(const MachineOperand &MO);
|
||||
|
||||
/// ChangeToImmediate - Replace this operand with a new immediate operand of
|
||||
/// the specified value. If an operand is known to be an immediate already,
|
||||
/// the setImm method should be used.
|
||||
void ChangeToImmediate(int64_t ImmVal);
|
||||
|
||||
/// ChangeToRegister - Replace this operand with a new register operand of
|
||||
/// the specified value. If an operand is known to be an register already,
|
||||
/// the setReg method should be used.
|
||||
void ChangeToRegister(unsigned Reg, bool isDef, bool isImp = false,
|
||||
bool isKill = false, bool isDead = false,
|
||||
bool isUndef = false, bool isDebug = false);
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Construction methods.
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
static MachineOperand CreateImm(int64_t Val) {
|
||||
MachineOperand Op(MachineOperand::MO_Immediate);
|
||||
Op.setImm(Val);
|
||||
return Op;
|
||||
}
|
||||
|
||||
static MachineOperand CreateCImm(const ConstantInt *CI) {
|
||||
MachineOperand Op(MachineOperand::MO_CImmediate);
|
||||
Op.Contents.CI = CI;
|
||||
return Op;
|
||||
}
|
||||
|
||||
static MachineOperand CreateFPImm(const ConstantFP *CFP) {
|
||||
MachineOperand Op(MachineOperand::MO_FPImmediate);
|
||||
Op.Contents.CFP = CFP;
|
||||
return Op;
|
||||
}
|
||||
|
||||
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp = false,
|
||||
bool isKill = false, bool isDead = false,
|
||||
bool isUndef = false,
|
||||
bool isEarlyClobber = false,
|
||||
unsigned SubReg = 0,
|
||||
bool isDebug = false,
|
||||
bool isInternalRead = false) {
|
||||
MachineOperand Op(MachineOperand::MO_Register);
|
||||
Op.IsDef = isDef;
|
||||
Op.IsImp = isImp;
|
||||
Op.IsKill = isKill;
|
||||
Op.IsDead = isDead;
|
||||
Op.IsUndef = isUndef;
|
||||
Op.IsInternalRead = isInternalRead;
|
||||
Op.IsEarlyClobber = isEarlyClobber;
|
||||
Op.TiedTo = 0;
|
||||
Op.IsDebug = isDebug;
|
||||
Op.SmallContents.RegNo = Reg;
|
||||
Op.Contents.Reg.Prev = 0;
|
||||
Op.Contents.Reg.Next = 0;
|
||||
Op.setSubReg(SubReg);
|
||||
return Op;
|
||||
}
|
||||
static MachineOperand CreateMBB(MachineBasicBlock *MBB,
|
||||
unsigned char TargetFlags = 0) {
|
||||
MachineOperand Op(MachineOperand::MO_MachineBasicBlock);
|
||||
Op.setMBB(MBB);
|
||||
Op.setTargetFlags(TargetFlags);
|
||||
return Op;
|
||||
}
|
||||
static MachineOperand CreateFI(int Idx) {
|
||||
MachineOperand Op(MachineOperand::MO_FrameIndex);
|
||||
Op.setIndex(Idx);
|
||||
return Op;
|
||||
}
|
||||
static MachineOperand CreateCPI(unsigned Idx, int Offset,
|
||||
unsigned char TargetFlags = 0) {
|
||||
MachineOperand Op(MachineOperand::MO_ConstantPoolIndex);
|
||||
Op.setIndex(Idx);
|
||||
Op.setOffset(Offset);
|
||||
Op.setTargetFlags(TargetFlags);
|
||||
return Op;
|
||||
}
|
||||
static MachineOperand CreateTargetIndex(unsigned Idx, int64_t Offset,
|
||||
unsigned char TargetFlags = 0) {
|
||||
MachineOperand Op(MachineOperand::MO_TargetIndex);
|
||||
Op.setIndex(Idx);
|
||||
Op.setOffset(Offset);
|
||||
Op.setTargetFlags(TargetFlags);
|
||||
return Op;
|
||||
}
|
||||
static MachineOperand CreateJTI(unsigned Idx,
|
||||
unsigned char TargetFlags = 0) {
|
||||
MachineOperand Op(MachineOperand::MO_JumpTableIndex);
|
||||
Op.setIndex(Idx);
|
||||
Op.setTargetFlags(TargetFlags);
|
||||
return Op;
|
||||
}
|
||||
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset,
|
||||
unsigned char TargetFlags = 0) {
|
||||
MachineOperand Op(MachineOperand::MO_GlobalAddress);
|
||||
Op.Contents.OffsetedInfo.Val.GV = GV;
|
||||
Op.setOffset(Offset);
|
||||
Op.setTargetFlags(TargetFlags);
|
||||
return Op;
|
||||
}
|
||||
static MachineOperand CreateES(const char *SymName,
|
||||
unsigned char TargetFlags = 0) {
|
||||
MachineOperand Op(MachineOperand::MO_ExternalSymbol);
|
||||
Op.Contents.OffsetedInfo.Val.SymbolName = SymName;
|
||||
Op.setOffset(0); // Offset is always 0.
|
||||
Op.setTargetFlags(TargetFlags);
|
||||
return Op;
|
||||
}
|
||||
static MachineOperand CreateBA(const BlockAddress *BA, int64_t Offset,
|
||||
unsigned char TargetFlags = 0) {
|
||||
MachineOperand Op(MachineOperand::MO_BlockAddress);
|
||||
Op.Contents.OffsetedInfo.Val.BA = BA;
|
||||
Op.setOffset(Offset);
|
||||
Op.setTargetFlags(TargetFlags);
|
||||
return Op;
|
||||
}
|
||||
/// CreateRegMask - Creates a register mask operand referencing Mask. The
|
||||
/// operand does not take ownership of the memory referenced by Mask, it must
|
||||
/// remain valid for the lifetime of the operand.
|
||||
///
|
||||
/// A RegMask operand represents a set of non-clobbered physical registers on
|
||||
/// an instruction that clobbers many registers, typically a call. The bit
|
||||
/// mask has a bit set for each physreg that is preserved by this
|
||||
/// instruction, as described in the documentation for
|
||||
/// TargetRegisterInfo::getCallPreservedMask().
|
||||
///
|
||||
/// Any physreg with a 0 bit in the mask is clobbered by the instruction.
|
||||
///
|
||||
static MachineOperand CreateRegMask(const uint32_t *Mask) {
|
||||
assert(Mask && "Missing register mask");
|
||||
MachineOperand Op(MachineOperand::MO_RegisterMask);
|
||||
Op.Contents.RegMask = Mask;
|
||||
return Op;
|
||||
}
|
||||
static MachineOperand CreateMetadata(const MDNode *Meta) {
|
||||
MachineOperand Op(MachineOperand::MO_Metadata);
|
||||
Op.Contents.MD = Meta;
|
||||
return Op;
|
||||
}
|
||||
|
||||
static MachineOperand CreateMCSymbol(MCSymbol *Sym) {
|
||||
MachineOperand Op(MachineOperand::MO_MCSymbol);
|
||||
Op.Contents.Sym = Sym;
|
||||
return Op;
|
||||
}
|
||||
|
||||
friend class MachineInstr;
|
||||
friend class MachineRegisterInfo;
|
||||
private:
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Methods for handling register use/def lists.
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
/// isOnRegUseList - Return true if this operand is on a register use/def list
|
||||
/// or false if not. This can only be called for register operands that are
|
||||
/// part of a machine instruction.
|
||||
bool isOnRegUseList() const {
|
||||
assert(isReg() && "Can only add reg operand to use lists");
|
||||
return Contents.Reg.Prev != 0;
|
||||
}
|
||||
};
|
||||
|
||||
inline raw_ostream &operator<<(raw_ostream &OS, const MachineOperand& MO) {
|
||||
MO.print(OS, 0);
|
||||
return OS;
|
||||
}
|
||||
|
||||
// See friend declaration above. This additional declaration is required in
|
||||
// order to compile LLVM with IBM xlC compiler.
|
||||
hash_code hash_value(const MachineOperand &MO);
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
158
thirdparty/clang/include/llvm/CodeGen/MachinePassRegistry.h
vendored
Normal file
158
thirdparty/clang/include/llvm/CodeGen/MachinePassRegistry.h
vendored
Normal file
@@ -0,0 +1,158 @@
|
||||
//===-- llvm/CodeGen/MachinePassRegistry.h ----------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file contains the mechanics for machine function pass registries. A
|
||||
// function pass registry (MachinePassRegistry) is auto filled by the static
|
||||
// constructors of MachinePassRegistryNode. Further there is a command line
|
||||
// parser (RegisterPassParser) which listens to each registry for additions
|
||||
// and deletions, so that the appropriate command option is updated.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINEPASSREGISTRY_H
|
||||
#define LLVM_CODEGEN_MACHINEPASSREGISTRY_H
|
||||
|
||||
#include "llvm/CodeGen/Passes.h"
|
||||
#include "llvm/Support/CommandLine.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
typedef void *(*MachinePassCtor)();
|
||||
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
///
|
||||
/// MachinePassRegistryListener - Listener to adds and removals of nodes in
|
||||
/// registration list.
|
||||
///
|
||||
//===----------------------------------------------------------------------===//
|
||||
class MachinePassRegistryListener {
|
||||
virtual void anchor();
|
||||
public:
|
||||
MachinePassRegistryListener() {}
|
||||
virtual ~MachinePassRegistryListener() {}
|
||||
virtual void NotifyAdd(const char *N, MachinePassCtor C, const char *D) = 0;
|
||||
virtual void NotifyRemove(const char *N) = 0;
|
||||
};
|
||||
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
///
|
||||
/// MachinePassRegistryNode - Machine pass node stored in registration list.
|
||||
///
|
||||
//===----------------------------------------------------------------------===//
|
||||
class MachinePassRegistryNode {
|
||||
|
||||
private:
|
||||
|
||||
MachinePassRegistryNode *Next; // Next function pass in list.
|
||||
const char *Name; // Name of function pass.
|
||||
const char *Description; // Description string.
|
||||
MachinePassCtor Ctor; // Function pass creator.
|
||||
|
||||
public:
|
||||
|
||||
MachinePassRegistryNode(const char *N, const char *D, MachinePassCtor C)
|
||||
: Next(NULL)
|
||||
, Name(N)
|
||||
, Description(D)
|
||||
, Ctor(C)
|
||||
{}
|
||||
|
||||
// Accessors
|
||||
MachinePassRegistryNode *getNext() const { return Next; }
|
||||
MachinePassRegistryNode **getNextAddress() { return &Next; }
|
||||
const char *getName() const { return Name; }
|
||||
const char *getDescription() const { return Description; }
|
||||
MachinePassCtor getCtor() const { return Ctor; }
|
||||
void setNext(MachinePassRegistryNode *N) { Next = N; }
|
||||
|
||||
};
|
||||
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
///
|
||||
/// MachinePassRegistry - Track the registration of machine passes.
|
||||
///
|
||||
//===----------------------------------------------------------------------===//
|
||||
class MachinePassRegistry {
|
||||
|
||||
private:
|
||||
|
||||
MachinePassRegistryNode *List; // List of registry nodes.
|
||||
MachinePassCtor Default; // Default function pass creator.
|
||||
MachinePassRegistryListener* Listener;// Listener for list adds are removes.
|
||||
|
||||
public:
|
||||
|
||||
// NO CONSTRUCTOR - we don't want static constructor ordering to mess
|
||||
// with the registry.
|
||||
|
||||
// Accessors.
|
||||
//
|
||||
MachinePassRegistryNode *getList() { return List; }
|
||||
MachinePassCtor getDefault() { return Default; }
|
||||
void setDefault(MachinePassCtor C) { Default = C; }
|
||||
void setDefault(StringRef Name);
|
||||
void setListener(MachinePassRegistryListener *L) { Listener = L; }
|
||||
|
||||
/// Add - Adds a function pass to the registration list.
|
||||
///
|
||||
void Add(MachinePassRegistryNode *Node);
|
||||
|
||||
/// Remove - Removes a function pass from the registration list.
|
||||
///
|
||||
void Remove(MachinePassRegistryNode *Node);
|
||||
|
||||
};
|
||||
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
///
|
||||
/// RegisterPassParser class - Handle the addition of new machine passes.
|
||||
///
|
||||
//===----------------------------------------------------------------------===//
|
||||
template<class RegistryClass>
|
||||
class RegisterPassParser : public MachinePassRegistryListener,
|
||||
public cl::parser<typename RegistryClass::FunctionPassCtor> {
|
||||
public:
|
||||
RegisterPassParser() {}
|
||||
~RegisterPassParser() { RegistryClass::setListener(NULL); }
|
||||
|
||||
void initialize(cl::Option &O) {
|
||||
cl::parser<typename RegistryClass::FunctionPassCtor>::initialize(O);
|
||||
|
||||
// Add existing passes to option.
|
||||
for (RegistryClass *Node = RegistryClass::getList();
|
||||
Node; Node = Node->getNext()) {
|
||||
this->addLiteralOption(Node->getName(),
|
||||
(typename RegistryClass::FunctionPassCtor)Node->getCtor(),
|
||||
Node->getDescription());
|
||||
}
|
||||
|
||||
// Make sure we listen for list changes.
|
||||
RegistryClass::setListener(this);
|
||||
}
|
||||
|
||||
// Implement the MachinePassRegistryListener callbacks.
|
||||
//
|
||||
virtual void NotifyAdd(const char *N,
|
||||
MachinePassCtor C,
|
||||
const char *D) {
|
||||
this->addLiteralOption(N, (typename RegistryClass::FunctionPassCtor)C, D);
|
||||
}
|
||||
virtual void NotifyRemove(const char *N) {
|
||||
this->removeLiteralOption(N);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
87
thirdparty/clang/include/llvm/CodeGen/MachinePostDominators.h
vendored
Normal file
87
thirdparty/clang/include/llvm/CodeGen/MachinePostDominators.h
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
//=- llvm/CodeGen/MachineDominators.h ----------------------------*- C++ -*-==//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file exposes interfaces to post dominance information for
|
||||
// target-specific code.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINEPOSTDOMINATORS_H
|
||||
#define LLVM_CODEGEN_MACHINEPOSTDOMINATORS_H
|
||||
|
||||
#include "llvm/Analysis/Dominators.h"
|
||||
#include "llvm/CodeGen/MachineDominators.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
///
|
||||
/// PostDominatorTree Class - Concrete subclass of DominatorTree that is used
|
||||
/// to compute the a post-dominator tree.
|
||||
///
|
||||
struct MachinePostDominatorTree : public MachineFunctionPass {
|
||||
private:
|
||||
DominatorTreeBase<MachineBasicBlock> *DT;
|
||||
|
||||
public:
|
||||
static char ID;
|
||||
|
||||
MachinePostDominatorTree();
|
||||
|
||||
~MachinePostDominatorTree();
|
||||
|
||||
FunctionPass *createMachinePostDominatorTreePass();
|
||||
|
||||
const std::vector<MachineBasicBlock *> &getRoots() const {
|
||||
return DT->getRoots();
|
||||
}
|
||||
|
||||
MachineDomTreeNode *getRootNode() const {
|
||||
return DT->getRootNode();
|
||||
}
|
||||
|
||||
MachineDomTreeNode *operator[](MachineBasicBlock *BB) const {
|
||||
return DT->getNode(BB);
|
||||
}
|
||||
|
||||
MachineDomTreeNode *getNode(MachineBasicBlock *BB) const {
|
||||
return DT->getNode(BB);
|
||||
}
|
||||
|
||||
bool dominates(const MachineDomTreeNode *A,
|
||||
const MachineDomTreeNode *B) const {
|
||||
return DT->dominates(A, B);
|
||||
}
|
||||
|
||||
bool dominates(const MachineBasicBlock *A, const MachineBasicBlock *B) const {
|
||||
return DT->dominates(A, B);
|
||||
}
|
||||
|
||||
bool properlyDominates(const MachineDomTreeNode *A,
|
||||
const MachineDomTreeNode *B) const {
|
||||
return DT->properlyDominates(A, B);
|
||||
}
|
||||
|
||||
bool properlyDominates(const MachineBasicBlock *A,
|
||||
const MachineBasicBlock *B) const {
|
||||
return DT->properlyDominates(A, B);
|
||||
}
|
||||
|
||||
MachineBasicBlock *findNearestCommonDominator(MachineBasicBlock *A,
|
||||
MachineBasicBlock *B) {
|
||||
return DT->findNearestCommonDominator(A, B);
|
||||
}
|
||||
|
||||
virtual bool runOnMachineFunction(MachineFunction &MF);
|
||||
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
|
||||
virtual void print(llvm::raw_ostream &OS, const Module *M = 0) const;
|
||||
};
|
||||
} //end of namespace llvm
|
||||
|
||||
#endif
|
||||
619
thirdparty/clang/include/llvm/CodeGen/MachineRegisterInfo.h
vendored
Normal file
619
thirdparty/clang/include/llvm/CodeGen/MachineRegisterInfo.h
vendored
Normal file
@@ -0,0 +1,619 @@
|
||||
//===-- llvm/CodeGen/MachineRegisterInfo.h ----------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines the MachineRegisterInfo class.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINEREGISTERINFO_H
|
||||
#define LLVM_CODEGEN_MACHINEREGISTERINFO_H
|
||||
|
||||
#include "llvm/ADT/BitVector.h"
|
||||
#include "llvm/ADT/IndexedMap.h"
|
||||
#include "llvm/CodeGen/MachineInstrBundle.h"
|
||||
#include "llvm/Target/TargetRegisterInfo.h"
|
||||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
/// MachineRegisterInfo - Keep track of information for virtual and physical
|
||||
/// registers, including vreg register classes, use/def chains for registers,
|
||||
/// etc.
|
||||
class MachineRegisterInfo {
|
||||
const TargetRegisterInfo *const TRI;
|
||||
|
||||
/// IsSSA - True when the machine function is in SSA form and virtual
|
||||
/// registers have a single def.
|
||||
bool IsSSA;
|
||||
|
||||
/// TracksLiveness - True while register liveness is being tracked accurately.
|
||||
/// Basic block live-in lists, kill flags, and implicit defs may not be
|
||||
/// accurate when after this flag is cleared.
|
||||
bool TracksLiveness;
|
||||
|
||||
/// VRegInfo - Information we keep for each virtual register.
|
||||
///
|
||||
/// Each element in this list contains the register class of the vreg and the
|
||||
/// start of the use/def list for the register.
|
||||
IndexedMap<std::pair<const TargetRegisterClass*, MachineOperand*>,
|
||||
VirtReg2IndexFunctor> VRegInfo;
|
||||
|
||||
/// RegAllocHints - This vector records register allocation hints for virtual
|
||||
/// registers. For each virtual register, it keeps a register and hint type
|
||||
/// pair making up the allocation hint. Hint type is target specific except
|
||||
/// for the value 0 which means the second value of the pair is the preferred
|
||||
/// register for allocation. For example, if the hint is <0, 1024>, it means
|
||||
/// the allocator should prefer the physical register allocated to the virtual
|
||||
/// register of the hint.
|
||||
IndexedMap<std::pair<unsigned, unsigned>, VirtReg2IndexFunctor> RegAllocHints;
|
||||
|
||||
/// PhysRegUseDefLists - This is an array of the head of the use/def list for
|
||||
/// physical registers.
|
||||
MachineOperand **PhysRegUseDefLists;
|
||||
|
||||
/// getRegUseDefListHead - Return the head pointer for the register use/def
|
||||
/// list for the specified virtual or physical register.
|
||||
MachineOperand *&getRegUseDefListHead(unsigned RegNo) {
|
||||
if (TargetRegisterInfo::isVirtualRegister(RegNo))
|
||||
return VRegInfo[RegNo].second;
|
||||
return PhysRegUseDefLists[RegNo];
|
||||
}
|
||||
|
||||
MachineOperand *getRegUseDefListHead(unsigned RegNo) const {
|
||||
if (TargetRegisterInfo::isVirtualRegister(RegNo))
|
||||
return VRegInfo[RegNo].second;
|
||||
return PhysRegUseDefLists[RegNo];
|
||||
}
|
||||
|
||||
/// Get the next element in the use-def chain.
|
||||
static MachineOperand *getNextOperandForReg(const MachineOperand *MO) {
|
||||
assert(MO && MO->isReg() && "This is not a register operand!");
|
||||
return MO->Contents.Reg.Next;
|
||||
}
|
||||
|
||||
/// UsedRegUnits - This is a bit vector that is computed and set by the
|
||||
/// register allocator, and must be kept up to date by passes that run after
|
||||
/// register allocation (though most don't modify this). This is used
|
||||
/// so that the code generator knows which callee save registers to save and
|
||||
/// for other target specific uses.
|
||||
/// This vector has bits set for register units that are modified in the
|
||||
/// current function. It doesn't include registers clobbered by function
|
||||
/// calls with register mask operands.
|
||||
BitVector UsedRegUnits;
|
||||
|
||||
/// UsedPhysRegMask - Additional used physregs including aliases.
|
||||
/// This bit vector represents all the registers clobbered by function calls.
|
||||
/// It can model things that UsedRegUnits can't, such as function calls that
|
||||
/// clobber ymm7 but preserve the low half in xmm7.
|
||||
BitVector UsedPhysRegMask;
|
||||
|
||||
/// ReservedRegs - This is a bit vector of reserved registers. The target
|
||||
/// may change its mind about which registers should be reserved. This
|
||||
/// vector is the frozen set of reserved registers when register allocation
|
||||
/// started.
|
||||
BitVector ReservedRegs;
|
||||
|
||||
/// Keep track of the physical registers that are live in to the function.
|
||||
/// Live in values are typically arguments in registers. LiveIn values are
|
||||
/// allowed to have virtual registers associated with them, stored in the
|
||||
/// second element.
|
||||
std::vector<std::pair<unsigned, unsigned> > LiveIns;
|
||||
|
||||
MachineRegisterInfo(const MachineRegisterInfo&) LLVM_DELETED_FUNCTION;
|
||||
void operator=(const MachineRegisterInfo&) LLVM_DELETED_FUNCTION;
|
||||
public:
|
||||
explicit MachineRegisterInfo(const TargetRegisterInfo &TRI);
|
||||
~MachineRegisterInfo();
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Function State
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
// isSSA - Returns true when the machine function is in SSA form. Early
|
||||
// passes require the machine function to be in SSA form where every virtual
|
||||
// register has a single defining instruction.
|
||||
//
|
||||
// The TwoAddressInstructionPass and PHIElimination passes take the machine
|
||||
// function out of SSA form when they introduce multiple defs per virtual
|
||||
// register.
|
||||
bool isSSA() const { return IsSSA; }
|
||||
|
||||
// leaveSSA - Indicates that the machine function is no longer in SSA form.
|
||||
void leaveSSA() { IsSSA = false; }
|
||||
|
||||
/// tracksLiveness - Returns true when tracking register liveness accurately.
|
||||
///
|
||||
/// While this flag is true, register liveness information in basic block
|
||||
/// live-in lists and machine instruction operands is accurate. This means it
|
||||
/// can be used to change the code in ways that affect the values in
|
||||
/// registers, for example by the register scavenger.
|
||||
///
|
||||
/// When this flag is false, liveness is no longer reliable.
|
||||
bool tracksLiveness() const { return TracksLiveness; }
|
||||
|
||||
/// invalidateLiveness - Indicates that register liveness is no longer being
|
||||
/// tracked accurately.
|
||||
///
|
||||
/// This should be called by late passes that invalidate the liveness
|
||||
/// information.
|
||||
void invalidateLiveness() { TracksLiveness = false; }
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Register Info
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
// Strictly for use by MachineInstr.cpp.
|
||||
void addRegOperandToUseList(MachineOperand *MO);
|
||||
|
||||
// Strictly for use by MachineInstr.cpp.
|
||||
void removeRegOperandFromUseList(MachineOperand *MO);
|
||||
|
||||
// Strictly for use by MachineInstr.cpp.
|
||||
void moveOperands(MachineOperand *Dst, MachineOperand *Src, unsigned NumOps);
|
||||
|
||||
/// Verify the sanity of the use list for Reg.
|
||||
void verifyUseList(unsigned Reg) const;
|
||||
|
||||
/// Verify the use list of all registers.
|
||||
void verifyUseLists() const;
|
||||
|
||||
/// reg_begin/reg_end - Provide iteration support to walk over all definitions
|
||||
/// and uses of a register within the MachineFunction that corresponds to this
|
||||
/// MachineRegisterInfo object.
|
||||
template<bool Uses, bool Defs, bool SkipDebug>
|
||||
class defusechain_iterator;
|
||||
|
||||
// Make it a friend so it can access getNextOperandForReg().
|
||||
template<bool, bool, bool> friend class defusechain_iterator;
|
||||
|
||||
/// reg_iterator/reg_begin/reg_end - Walk all defs and uses of the specified
|
||||
/// register.
|
||||
typedef defusechain_iterator<true,true,false> reg_iterator;
|
||||
reg_iterator reg_begin(unsigned RegNo) const {
|
||||
return reg_iterator(getRegUseDefListHead(RegNo));
|
||||
}
|
||||
static reg_iterator reg_end() { return reg_iterator(0); }
|
||||
|
||||
/// reg_empty - Return true if there are no instructions using or defining the
|
||||
/// specified register (it may be live-in).
|
||||
bool reg_empty(unsigned RegNo) const { return reg_begin(RegNo) == reg_end(); }
|
||||
|
||||
/// reg_nodbg_iterator/reg_nodbg_begin/reg_nodbg_end - Walk all defs and uses
|
||||
/// of the specified register, skipping those marked as Debug.
|
||||
typedef defusechain_iterator<true,true,true> reg_nodbg_iterator;
|
||||
reg_nodbg_iterator reg_nodbg_begin(unsigned RegNo) const {
|
||||
return reg_nodbg_iterator(getRegUseDefListHead(RegNo));
|
||||
}
|
||||
static reg_nodbg_iterator reg_nodbg_end() { return reg_nodbg_iterator(0); }
|
||||
|
||||
/// reg_nodbg_empty - Return true if the only instructions using or defining
|
||||
/// Reg are Debug instructions.
|
||||
bool reg_nodbg_empty(unsigned RegNo) const {
|
||||
return reg_nodbg_begin(RegNo) == reg_nodbg_end();
|
||||
}
|
||||
|
||||
/// def_iterator/def_begin/def_end - Walk all defs of the specified register.
|
||||
typedef defusechain_iterator<false,true,false> def_iterator;
|
||||
def_iterator def_begin(unsigned RegNo) const {
|
||||
return def_iterator(getRegUseDefListHead(RegNo));
|
||||
}
|
||||
static def_iterator def_end() { return def_iterator(0); }
|
||||
|
||||
/// def_empty - Return true if there are no instructions defining the
|
||||
/// specified register (it may be live-in).
|
||||
bool def_empty(unsigned RegNo) const { return def_begin(RegNo) == def_end(); }
|
||||
|
||||
/// hasOneDef - Return true if there is exactly one instruction defining the
|
||||
/// specified register.
|
||||
bool hasOneDef(unsigned RegNo) const {
|
||||
def_iterator DI = def_begin(RegNo);
|
||||
if (DI == def_end())
|
||||
return false;
|
||||
return ++DI == def_end();
|
||||
}
|
||||
|
||||
/// use_iterator/use_begin/use_end - Walk all uses of the specified register.
|
||||
typedef defusechain_iterator<true,false,false> use_iterator;
|
||||
use_iterator use_begin(unsigned RegNo) const {
|
||||
return use_iterator(getRegUseDefListHead(RegNo));
|
||||
}
|
||||
static use_iterator use_end() { return use_iterator(0); }
|
||||
|
||||
/// use_empty - Return true if there are no instructions using the specified
|
||||
/// register.
|
||||
bool use_empty(unsigned RegNo) const { return use_begin(RegNo) == use_end(); }
|
||||
|
||||
/// hasOneUse - Return true if there is exactly one instruction using the
|
||||
/// specified register.
|
||||
bool hasOneUse(unsigned RegNo) const {
|
||||
use_iterator UI = use_begin(RegNo);
|
||||
if (UI == use_end())
|
||||
return false;
|
||||
return ++UI == use_end();
|
||||
}
|
||||
|
||||
/// use_nodbg_iterator/use_nodbg_begin/use_nodbg_end - Walk all uses of the
|
||||
/// specified register, skipping those marked as Debug.
|
||||
typedef defusechain_iterator<true,false,true> use_nodbg_iterator;
|
||||
use_nodbg_iterator use_nodbg_begin(unsigned RegNo) const {
|
||||
return use_nodbg_iterator(getRegUseDefListHead(RegNo));
|
||||
}
|
||||
static use_nodbg_iterator use_nodbg_end() { return use_nodbg_iterator(0); }
|
||||
|
||||
/// use_nodbg_empty - Return true if there are no non-Debug instructions
|
||||
/// using the specified register.
|
||||
bool use_nodbg_empty(unsigned RegNo) const {
|
||||
return use_nodbg_begin(RegNo) == use_nodbg_end();
|
||||
}
|
||||
|
||||
/// hasOneNonDBGUse - Return true if there is exactly one non-Debug
|
||||
/// instruction using the specified register.
|
||||
bool hasOneNonDBGUse(unsigned RegNo) const;
|
||||
|
||||
/// replaceRegWith - Replace all instances of FromReg with ToReg in the
|
||||
/// machine function. This is like llvm-level X->replaceAllUsesWith(Y),
|
||||
/// except that it also changes any definitions of the register as well.
|
||||
///
|
||||
/// Note that it is usually necessary to first constrain ToReg's register
|
||||
/// class to match the FromReg constraints using:
|
||||
///
|
||||
/// constrainRegClass(ToReg, getRegClass(FromReg))
|
||||
///
|
||||
/// That function will return NULL if the virtual registers have incompatible
|
||||
/// constraints.
|
||||
void replaceRegWith(unsigned FromReg, unsigned ToReg);
|
||||
|
||||
/// getVRegDef - Return the machine instr that defines the specified virtual
|
||||
/// register or null if none is found. This assumes that the code is in SSA
|
||||
/// form, so there should only be one definition.
|
||||
MachineInstr *getVRegDef(unsigned Reg) const;
|
||||
|
||||
/// getUniqueVRegDef - Return the unique machine instr that defines the
|
||||
/// specified virtual register or null if none is found. If there are
|
||||
/// multiple definitions or no definition, return null.
|
||||
MachineInstr *getUniqueVRegDef(unsigned Reg) const;
|
||||
|
||||
/// clearKillFlags - Iterate over all the uses of the given register and
|
||||
/// clear the kill flag from the MachineOperand. This function is used by
|
||||
/// optimization passes which extend register lifetimes and need only
|
||||
/// preserve conservative kill flag information.
|
||||
void clearKillFlags(unsigned Reg) const;
|
||||
|
||||
#ifndef NDEBUG
|
||||
void dumpUses(unsigned RegNo) const;
|
||||
#endif
|
||||
|
||||
/// isConstantPhysReg - Returns true if PhysReg is unallocatable and constant
|
||||
/// throughout the function. It is safe to move instructions that read such
|
||||
/// a physreg.
|
||||
bool isConstantPhysReg(unsigned PhysReg, const MachineFunction &MF) const;
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Virtual Register Info
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
/// getRegClass - Return the register class of the specified virtual register.
|
||||
///
|
||||
const TargetRegisterClass *getRegClass(unsigned Reg) const {
|
||||
return VRegInfo[Reg].first;
|
||||
}
|
||||
|
||||
/// setRegClass - Set the register class of the specified virtual register.
|
||||
///
|
||||
void setRegClass(unsigned Reg, const TargetRegisterClass *RC);
|
||||
|
||||
/// constrainRegClass - Constrain the register class of the specified virtual
|
||||
/// register to be a common subclass of RC and the current register class,
|
||||
/// but only if the new class has at least MinNumRegs registers. Return the
|
||||
/// new register class, or NULL if no such class exists.
|
||||
/// This should only be used when the constraint is known to be trivial, like
|
||||
/// GR32 -> GR32_NOSP. Beware of increasing register pressure.
|
||||
///
|
||||
const TargetRegisterClass *constrainRegClass(unsigned Reg,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned MinNumRegs = 0);
|
||||
|
||||
/// recomputeRegClass - Try to find a legal super-class of Reg's register
|
||||
/// class that still satisfies the constraints from the instructions using
|
||||
/// Reg. Returns true if Reg was upgraded.
|
||||
///
|
||||
/// This method can be used after constraints have been removed from a
|
||||
/// virtual register, for example after removing instructions or splitting
|
||||
/// the live range.
|
||||
///
|
||||
bool recomputeRegClass(unsigned Reg, const TargetMachine&);
|
||||
|
||||
/// createVirtualRegister - Create and return a new virtual register in the
|
||||
/// function with the specified register class.
|
||||
///
|
||||
unsigned createVirtualRegister(const TargetRegisterClass *RegClass);
|
||||
|
||||
/// getNumVirtRegs - Return the number of virtual registers created.
|
||||
///
|
||||
unsigned getNumVirtRegs() const { return VRegInfo.size(); }
|
||||
|
||||
/// clearVirtRegs - Remove all virtual registers (after physreg assignment).
|
||||
void clearVirtRegs();
|
||||
|
||||
/// setRegAllocationHint - Specify a register allocation hint for the
|
||||
/// specified virtual register.
|
||||
void setRegAllocationHint(unsigned Reg, unsigned Type, unsigned PrefReg) {
|
||||
RegAllocHints[Reg].first = Type;
|
||||
RegAllocHints[Reg].second = PrefReg;
|
||||
}
|
||||
|
||||
/// getRegAllocationHint - Return the register allocation hint for the
|
||||
/// specified virtual register.
|
||||
std::pair<unsigned, unsigned>
|
||||
getRegAllocationHint(unsigned Reg) const {
|
||||
return RegAllocHints[Reg];
|
||||
}
|
||||
|
||||
/// getSimpleHint - Return the preferred register allocation hint, or 0 if a
|
||||
/// standard simple hint (Type == 0) is not set.
|
||||
unsigned getSimpleHint(unsigned Reg) const {
|
||||
std::pair<unsigned, unsigned> Hint = getRegAllocationHint(Reg);
|
||||
return Hint.first ? 0 : Hint.second;
|
||||
}
|
||||
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Physical Register Use Info
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
/// isPhysRegUsed - Return true if the specified register is used in this
|
||||
/// function. Also check for clobbered aliases and registers clobbered by
|
||||
/// function calls with register mask operands.
|
||||
///
|
||||
/// This only works after register allocation. It is primarily used by
|
||||
/// PrologEpilogInserter to determine which callee-saved registers need
|
||||
/// spilling.
|
||||
bool isPhysRegUsed(unsigned Reg) const {
|
||||
if (UsedPhysRegMask.test(Reg))
|
||||
return true;
|
||||
for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units)
|
||||
if (UsedRegUnits.test(*Units))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Mark the specified register unit as used in this function.
|
||||
/// This should only be called during and after register allocation.
|
||||
void setRegUnitUsed(unsigned RegUnit) {
|
||||
UsedRegUnits.set(RegUnit);
|
||||
}
|
||||
|
||||
/// setPhysRegUsed - Mark the specified register used in this function.
|
||||
/// This should only be called during and after register allocation.
|
||||
void setPhysRegUsed(unsigned Reg) {
|
||||
for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units)
|
||||
UsedRegUnits.set(*Units);
|
||||
}
|
||||
|
||||
/// addPhysRegsUsedFromRegMask - Mark any registers not in RegMask as used.
|
||||
/// This corresponds to the bit mask attached to register mask operands.
|
||||
void addPhysRegsUsedFromRegMask(const uint32_t *RegMask) {
|
||||
UsedPhysRegMask.setBitsNotInMask(RegMask);
|
||||
}
|
||||
|
||||
/// setPhysRegUnused - Mark the specified register unused in this function.
|
||||
/// This should only be called during and after register allocation.
|
||||
void setPhysRegUnused(unsigned Reg) {
|
||||
UsedPhysRegMask.reset(Reg);
|
||||
for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units)
|
||||
UsedRegUnits.reset(*Units);
|
||||
}
|
||||
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Reserved Register Info
|
||||
//===--------------------------------------------------------------------===//
|
||||
//
|
||||
// The set of reserved registers must be invariant during register
|
||||
// allocation. For example, the target cannot suddenly decide it needs a
|
||||
// frame pointer when the register allocator has already used the frame
|
||||
// pointer register for something else.
|
||||
//
|
||||
// These methods can be used by target hooks like hasFP() to avoid changing
|
||||
// the reserved register set during register allocation.
|
||||
|
||||
/// freezeReservedRegs - Called by the register allocator to freeze the set
|
||||
/// of reserved registers before allocation begins.
|
||||
void freezeReservedRegs(const MachineFunction&);
|
||||
|
||||
/// reservedRegsFrozen - Returns true after freezeReservedRegs() was called
|
||||
/// to ensure the set of reserved registers stays constant.
|
||||
bool reservedRegsFrozen() const {
|
||||
return !ReservedRegs.empty();
|
||||
}
|
||||
|
||||
/// canReserveReg - Returns true if PhysReg can be used as a reserved
|
||||
/// register. Any register can be reserved before freezeReservedRegs() is
|
||||
/// called.
|
||||
bool canReserveReg(unsigned PhysReg) const {
|
||||
return !reservedRegsFrozen() || ReservedRegs.test(PhysReg);
|
||||
}
|
||||
|
||||
/// getReservedRegs - Returns a reference to the frozen set of reserved
|
||||
/// registers. This method should always be preferred to calling
|
||||
/// TRI::getReservedRegs() when possible.
|
||||
const BitVector &getReservedRegs() const {
|
||||
assert(reservedRegsFrozen() &&
|
||||
"Reserved registers haven't been frozen yet. "
|
||||
"Use TRI::getReservedRegs().");
|
||||
return ReservedRegs;
|
||||
}
|
||||
|
||||
/// isReserved - Returns true when PhysReg is a reserved register.
|
||||
///
|
||||
/// Reserved registers may belong to an allocatable register class, but the
|
||||
/// target has explicitly requested that they are not used.
|
||||
///
|
||||
bool isReserved(unsigned PhysReg) const {
|
||||
return getReservedRegs().test(PhysReg);
|
||||
}
|
||||
|
||||
/// isAllocatable - Returns true when PhysReg belongs to an allocatable
|
||||
/// register class and it hasn't been reserved.
|
||||
///
|
||||
/// Allocatable registers may show up in the allocation order of some virtual
|
||||
/// register, so a register allocator needs to track its liveness and
|
||||
/// availability.
|
||||
bool isAllocatable(unsigned PhysReg) const {
|
||||
return TRI->isInAllocatableClass(PhysReg) && !isReserved(PhysReg);
|
||||
}
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// LiveIn Management
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
/// addLiveIn - Add the specified register as a live-in. Note that it
|
||||
/// is an error to add the same register to the same set more than once.
|
||||
void addLiveIn(unsigned Reg, unsigned vreg = 0) {
|
||||
LiveIns.push_back(std::make_pair(Reg, vreg));
|
||||
}
|
||||
|
||||
// Iteration support for the live-ins set. It's kept in sorted order
|
||||
// by register number.
|
||||
typedef std::vector<std::pair<unsigned,unsigned> >::const_iterator
|
||||
livein_iterator;
|
||||
livein_iterator livein_begin() const { return LiveIns.begin(); }
|
||||
livein_iterator livein_end() const { return LiveIns.end(); }
|
||||
bool livein_empty() const { return LiveIns.empty(); }
|
||||
|
||||
bool isLiveIn(unsigned Reg) const;
|
||||
|
||||
/// getLiveInPhysReg - If VReg is a live-in virtual register, return the
|
||||
/// corresponding live-in physical register.
|
||||
unsigned getLiveInPhysReg(unsigned VReg) const;
|
||||
|
||||
/// getLiveInVirtReg - If PReg is a live-in physical register, return the
|
||||
/// corresponding live-in physical register.
|
||||
unsigned getLiveInVirtReg(unsigned PReg) const;
|
||||
|
||||
/// EmitLiveInCopies - Emit copies to initialize livein virtual registers
|
||||
/// into the given entry block.
|
||||
void EmitLiveInCopies(MachineBasicBlock *EntryMBB,
|
||||
const TargetRegisterInfo &TRI,
|
||||
const TargetInstrInfo &TII);
|
||||
|
||||
/// defusechain_iterator - This class provides iterator support for machine
|
||||
/// operands in the function that use or define a specific register. If
|
||||
/// ReturnUses is true it returns uses of registers, if ReturnDefs is true it
|
||||
/// returns defs. If neither are true then you are silly and it always
|
||||
/// returns end(). If SkipDebug is true it skips uses marked Debug
|
||||
/// when incrementing.
|
||||
template<bool ReturnUses, bool ReturnDefs, bool SkipDebug>
|
||||
class defusechain_iterator
|
||||
: public std::iterator<std::forward_iterator_tag, MachineInstr, ptrdiff_t> {
|
||||
MachineOperand *Op;
|
||||
explicit defusechain_iterator(MachineOperand *op) : Op(op) {
|
||||
// If the first node isn't one we're interested in, advance to one that
|
||||
// we are interested in.
|
||||
if (op) {
|
||||
if ((!ReturnUses && op->isUse()) ||
|
||||
(!ReturnDefs && op->isDef()) ||
|
||||
(SkipDebug && op->isDebug()))
|
||||
++*this;
|
||||
}
|
||||
}
|
||||
friend class MachineRegisterInfo;
|
||||
public:
|
||||
typedef std::iterator<std::forward_iterator_tag,
|
||||
MachineInstr, ptrdiff_t>::reference reference;
|
||||
typedef std::iterator<std::forward_iterator_tag,
|
||||
MachineInstr, ptrdiff_t>::pointer pointer;
|
||||
|
||||
defusechain_iterator(const defusechain_iterator &I) : Op(I.Op) {}
|
||||
defusechain_iterator() : Op(0) {}
|
||||
|
||||
bool operator==(const defusechain_iterator &x) const {
|
||||
return Op == x.Op;
|
||||
}
|
||||
bool operator!=(const defusechain_iterator &x) const {
|
||||
return !operator==(x);
|
||||
}
|
||||
|
||||
/// atEnd - return true if this iterator is equal to reg_end() on the value.
|
||||
bool atEnd() const { return Op == 0; }
|
||||
|
||||
// Iterator traversal: forward iteration only
|
||||
defusechain_iterator &operator++() { // Preincrement
|
||||
assert(Op && "Cannot increment end iterator!");
|
||||
Op = getNextOperandForReg(Op);
|
||||
|
||||
// All defs come before the uses, so stop def_iterator early.
|
||||
if (!ReturnUses) {
|
||||
if (Op) {
|
||||
if (Op->isUse())
|
||||
Op = 0;
|
||||
else
|
||||
assert(!Op->isDebug() && "Can't have debug defs");
|
||||
}
|
||||
} else {
|
||||
// If this is an operand we don't care about, skip it.
|
||||
while (Op && ((!ReturnDefs && Op->isDef()) ||
|
||||
(SkipDebug && Op->isDebug())))
|
||||
Op = getNextOperandForReg(Op);
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
defusechain_iterator operator++(int) { // Postincrement
|
||||
defusechain_iterator tmp = *this; ++*this; return tmp;
|
||||
}
|
||||
|
||||
/// skipInstruction - move forward until reaching a different instruction.
|
||||
/// Return the skipped instruction that is no longer pointed to, or NULL if
|
||||
/// already pointing to end().
|
||||
MachineInstr *skipInstruction() {
|
||||
if (!Op) return 0;
|
||||
MachineInstr *MI = Op->getParent();
|
||||
do ++*this;
|
||||
while (Op && Op->getParent() == MI);
|
||||
return MI;
|
||||
}
|
||||
|
||||
MachineInstr *skipBundle() {
|
||||
if (!Op) return 0;
|
||||
MachineInstr *MI = getBundleStart(Op->getParent());
|
||||
do ++*this;
|
||||
while (Op && getBundleStart(Op->getParent()) == MI);
|
||||
return MI;
|
||||
}
|
||||
|
||||
MachineOperand &getOperand() const {
|
||||
assert(Op && "Cannot dereference end iterator!");
|
||||
return *Op;
|
||||
}
|
||||
|
||||
/// getOperandNo - Return the operand # of this MachineOperand in its
|
||||
/// MachineInstr.
|
||||
unsigned getOperandNo() const {
|
||||
assert(Op && "Cannot dereference end iterator!");
|
||||
return Op - &Op->getParent()->getOperand(0);
|
||||
}
|
||||
|
||||
// Retrieve a reference to the current operand.
|
||||
MachineInstr &operator*() const {
|
||||
assert(Op && "Cannot dereference end iterator!");
|
||||
return *Op->getParent();
|
||||
}
|
||||
|
||||
MachineInstr *operator->() const {
|
||||
assert(Op && "Cannot dereference end iterator!");
|
||||
return Op->getParent();
|
||||
}
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
342
thirdparty/clang/include/llvm/CodeGen/MachineRelocation.h
vendored
Normal file
342
thirdparty/clang/include/llvm/CodeGen/MachineRelocation.h
vendored
Normal file
@@ -0,0 +1,342 @@
|
||||
//===-- llvm/CodeGen/MachineRelocation.h - Target Relocation ----*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines the MachineRelocation class.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINERELOCATION_H
|
||||
#define LLVM_CODEGEN_MACHINERELOCATION_H
|
||||
|
||||
#include "llvm/Support/DataTypes.h"
|
||||
#include <cassert>
|
||||
|
||||
namespace llvm {
|
||||
class GlobalValue;
|
||||
class MachineBasicBlock;
|
||||
|
||||
/// MachineRelocation - This represents a target-specific relocation value,
|
||||
/// produced by the code emitter. This relocation is resolved after the has
|
||||
/// been emitted, either to an object file or to memory, when the target of the
|
||||
/// relocation can be resolved.
|
||||
///
|
||||
/// A relocation is made up of the following logical portions:
|
||||
/// 1. An offset in the machine code buffer, the location to modify.
|
||||
/// 2. A target specific relocation type (a number from 0 to 63).
|
||||
/// 3. A symbol being referenced, either as a GlobalValue* or as a string.
|
||||
/// 4. An optional constant value to be added to the reference.
|
||||
/// 5. A bit, CanRewrite, which indicates to the JIT that a function stub is
|
||||
/// not needed for the relocation.
|
||||
/// 6. An index into the GOT, if the target uses a GOT
|
||||
///
|
||||
class MachineRelocation {
|
||||
enum AddressType {
|
||||
isResult, // Relocation has be transformed into its result pointer.
|
||||
isGV, // The Target.GV field is valid.
|
||||
isIndirectSym, // Relocation of an indirect symbol.
|
||||
isBB, // Relocation of BB address.
|
||||
isExtSym, // The Target.ExtSym field is valid.
|
||||
isConstPool, // Relocation of constant pool address.
|
||||
isJumpTable, // Relocation of jump table address.
|
||||
isGOTIndex // The Target.GOTIndex field is valid.
|
||||
};
|
||||
|
||||
/// Offset - This is the offset from the start of the code buffer of the
|
||||
/// relocation to perform.
|
||||
uintptr_t Offset;
|
||||
|
||||
/// ConstantVal - A field that may be used by the target relocation type.
|
||||
intptr_t ConstantVal;
|
||||
|
||||
union {
|
||||
void *Result; // If this has been resolved to a resolved pointer
|
||||
GlobalValue *GV; // If this is a pointer to a GV or an indirect ref.
|
||||
MachineBasicBlock *MBB; // If this is a pointer to a LLVM BB
|
||||
const char *ExtSym; // If this is a pointer to a named symbol
|
||||
unsigned Index; // Constant pool / jump table index
|
||||
unsigned GOTIndex; // Index in the GOT of this symbol/global
|
||||
} Target;
|
||||
|
||||
unsigned TargetReloType : 6; // The target relocation ID
|
||||
AddressType AddrType : 4; // The field of Target to use
|
||||
bool MayNeedFarStub : 1; // True if this relocation may require a far-stub
|
||||
bool GOTRelative : 1; // Should this relocation be relative to the GOT?
|
||||
bool TargetResolve : 1; // True if target should resolve the address
|
||||
|
||||
public:
|
||||
// Relocation types used in a generic implementation. Currently, relocation
|
||||
// entries for all things use the generic VANILLA type until they are refined
|
||||
// into target relocation types.
|
||||
enum RelocationType {
|
||||
VANILLA
|
||||
};
|
||||
|
||||
/// MachineRelocation::getGV - Return a relocation entry for a GlobalValue.
|
||||
///
|
||||
static MachineRelocation getGV(uintptr_t offset, unsigned RelocationType,
|
||||
GlobalValue *GV, intptr_t cst = 0,
|
||||
bool MayNeedFarStub = 0,
|
||||
bool GOTrelative = 0) {
|
||||
assert((RelocationType & ~63) == 0 && "Relocation type too large!");
|
||||
MachineRelocation Result;
|
||||
Result.Offset = offset;
|
||||
Result.ConstantVal = cst;
|
||||
Result.TargetReloType = RelocationType;
|
||||
Result.AddrType = isGV;
|
||||
Result.MayNeedFarStub = MayNeedFarStub;
|
||||
Result.GOTRelative = GOTrelative;
|
||||
Result.TargetResolve = false;
|
||||
Result.Target.GV = GV;
|
||||
return Result;
|
||||
}
|
||||
|
||||
/// MachineRelocation::getIndirectSymbol - Return a relocation entry for an
|
||||
/// indirect symbol.
|
||||
static MachineRelocation getIndirectSymbol(uintptr_t offset,
|
||||
unsigned RelocationType,
|
||||
GlobalValue *GV, intptr_t cst = 0,
|
||||
bool MayNeedFarStub = 0,
|
||||
bool GOTrelative = 0) {
|
||||
assert((RelocationType & ~63) == 0 && "Relocation type too large!");
|
||||
MachineRelocation Result;
|
||||
Result.Offset = offset;
|
||||
Result.ConstantVal = cst;
|
||||
Result.TargetReloType = RelocationType;
|
||||
Result.AddrType = isIndirectSym;
|
||||
Result.MayNeedFarStub = MayNeedFarStub;
|
||||
Result.GOTRelative = GOTrelative;
|
||||
Result.TargetResolve = false;
|
||||
Result.Target.GV = GV;
|
||||
return Result;
|
||||
}
|
||||
|
||||
/// MachineRelocation::getBB - Return a relocation entry for a BB.
|
||||
///
|
||||
static MachineRelocation getBB(uintptr_t offset,unsigned RelocationType,
|
||||
MachineBasicBlock *MBB, intptr_t cst = 0) {
|
||||
assert((RelocationType & ~63) == 0 && "Relocation type too large!");
|
||||
MachineRelocation Result;
|
||||
Result.Offset = offset;
|
||||
Result.ConstantVal = cst;
|
||||
Result.TargetReloType = RelocationType;
|
||||
Result.AddrType = isBB;
|
||||
Result.MayNeedFarStub = false;
|
||||
Result.GOTRelative = false;
|
||||
Result.TargetResolve = false;
|
||||
Result.Target.MBB = MBB;
|
||||
return Result;
|
||||
}
|
||||
|
||||
/// MachineRelocation::getExtSym - Return a relocation entry for an external
|
||||
/// symbol, like "free".
|
||||
///
|
||||
static MachineRelocation getExtSym(uintptr_t offset, unsigned RelocationType,
|
||||
const char *ES, intptr_t cst = 0,
|
||||
bool GOTrelative = 0,
|
||||
bool NeedStub = true) {
|
||||
assert((RelocationType & ~63) == 0 && "Relocation type too large!");
|
||||
MachineRelocation Result;
|
||||
Result.Offset = offset;
|
||||
Result.ConstantVal = cst;
|
||||
Result.TargetReloType = RelocationType;
|
||||
Result.AddrType = isExtSym;
|
||||
Result.MayNeedFarStub = NeedStub;
|
||||
Result.GOTRelative = GOTrelative;
|
||||
Result.TargetResolve = false;
|
||||
Result.Target.ExtSym = ES;
|
||||
return Result;
|
||||
}
|
||||
|
||||
/// MachineRelocation::getConstPool - Return a relocation entry for a constant
|
||||
/// pool entry.
|
||||
///
|
||||
static MachineRelocation getConstPool(uintptr_t offset,unsigned RelocationType,
|
||||
unsigned CPI, intptr_t cst = 0,
|
||||
bool letTargetResolve = false) {
|
||||
assert((RelocationType & ~63) == 0 && "Relocation type too large!");
|
||||
MachineRelocation Result;
|
||||
Result.Offset = offset;
|
||||
Result.ConstantVal = cst;
|
||||
Result.TargetReloType = RelocationType;
|
||||
Result.AddrType = isConstPool;
|
||||
Result.MayNeedFarStub = false;
|
||||
Result.GOTRelative = false;
|
||||
Result.TargetResolve = letTargetResolve;
|
||||
Result.Target.Index = CPI;
|
||||
return Result;
|
||||
}
|
||||
|
||||
/// MachineRelocation::getJumpTable - Return a relocation entry for a jump
|
||||
/// table entry.
|
||||
///
|
||||
static MachineRelocation getJumpTable(uintptr_t offset,unsigned RelocationType,
|
||||
unsigned JTI, intptr_t cst = 0,
|
||||
bool letTargetResolve = false) {
|
||||
assert((RelocationType & ~63) == 0 && "Relocation type too large!");
|
||||
MachineRelocation Result;
|
||||
Result.Offset = offset;
|
||||
Result.ConstantVal = cst;
|
||||
Result.TargetReloType = RelocationType;
|
||||
Result.AddrType = isJumpTable;
|
||||
Result.MayNeedFarStub = false;
|
||||
Result.GOTRelative = false;
|
||||
Result.TargetResolve = letTargetResolve;
|
||||
Result.Target.Index = JTI;
|
||||
return Result;
|
||||
}
|
||||
|
||||
/// getMachineCodeOffset - Return the offset into the code buffer that the
|
||||
/// relocation should be performed.
|
||||
intptr_t getMachineCodeOffset() const {
|
||||
return Offset;
|
||||
}
|
||||
|
||||
/// getRelocationType - Return the target-specific relocation ID for this
|
||||
/// relocation.
|
||||
unsigned getRelocationType() const {
|
||||
return TargetReloType;
|
||||
}
|
||||
|
||||
/// getConstantVal - Get the constant value associated with this relocation.
|
||||
/// This is often an offset from the symbol.
|
||||
///
|
||||
intptr_t getConstantVal() const {
|
||||
return ConstantVal;
|
||||
}
|
||||
|
||||
/// setConstantVal - Set the constant value associated with this relocation.
|
||||
/// This is often an offset from the symbol.
|
||||
///
|
||||
void setConstantVal(intptr_t val) {
|
||||
ConstantVal = val;
|
||||
}
|
||||
|
||||
/// isGlobalValue - Return true if this relocation is a GlobalValue, as
|
||||
/// opposed to a constant string.
|
||||
bool isGlobalValue() const {
|
||||
return AddrType == isGV;
|
||||
}
|
||||
|
||||
/// isIndirectSymbol - Return true if this relocation is the address an
|
||||
/// indirect symbol
|
||||
bool isIndirectSymbol() const {
|
||||
return AddrType == isIndirectSym;
|
||||
}
|
||||
|
||||
/// isBasicBlock - Return true if this relocation is a basic block reference.
|
||||
///
|
||||
bool isBasicBlock() const {
|
||||
return AddrType == isBB;
|
||||
}
|
||||
|
||||
/// isExternalSymbol - Return true if this is a constant string.
|
||||
///
|
||||
bool isExternalSymbol() const {
|
||||
return AddrType == isExtSym;
|
||||
}
|
||||
|
||||
/// isConstantPoolIndex - Return true if this is a constant pool reference.
|
||||
///
|
||||
bool isConstantPoolIndex() const {
|
||||
return AddrType == isConstPool;
|
||||
}
|
||||
|
||||
/// isJumpTableIndex - Return true if this is a jump table reference.
|
||||
///
|
||||
bool isJumpTableIndex() const {
|
||||
return AddrType == isJumpTable;
|
||||
}
|
||||
|
||||
/// isGOTRelative - Return true the target wants the index into the GOT of
|
||||
/// the symbol rather than the address of the symbol.
|
||||
bool isGOTRelative() const {
|
||||
return GOTRelative;
|
||||
}
|
||||
|
||||
/// mayNeedFarStub - This function returns true if the JIT for this target may
|
||||
/// need either a stub function or an indirect global-variable load to handle
|
||||
/// the relocated GlobalValue reference. For example, the x86-64 call
|
||||
/// instruction can only call functions within +/-2GB of the call site.
|
||||
/// Anything farther away needs a longer mov+call sequence, which can't just
|
||||
/// be written on top of the existing call.
|
||||
bool mayNeedFarStub() const {
|
||||
return MayNeedFarStub;
|
||||
}
|
||||
|
||||
/// letTargetResolve - Return true if the target JITInfo is usually
|
||||
/// responsible for resolving the address of this relocation.
|
||||
bool letTargetResolve() const {
|
||||
return TargetResolve;
|
||||
}
|
||||
|
||||
/// getGlobalValue - If this is a global value reference, return the
|
||||
/// referenced global.
|
||||
GlobalValue *getGlobalValue() const {
|
||||
assert((isGlobalValue() || isIndirectSymbol()) &&
|
||||
"This is not a global value reference!");
|
||||
return Target.GV;
|
||||
}
|
||||
|
||||
MachineBasicBlock *getBasicBlock() const {
|
||||
assert(isBasicBlock() && "This is not a basic block reference!");
|
||||
return Target.MBB;
|
||||
}
|
||||
|
||||
/// getString - If this is a string value, return the string reference.
|
||||
///
|
||||
const char *getExternalSymbol() const {
|
||||
assert(isExternalSymbol() && "This is not an external symbol reference!");
|
||||
return Target.ExtSym;
|
||||
}
|
||||
|
||||
/// getConstantPoolIndex - If this is a const pool reference, return
|
||||
/// the index into the constant pool.
|
||||
unsigned getConstantPoolIndex() const {
|
||||
assert(isConstantPoolIndex() && "This is not a constant pool reference!");
|
||||
return Target.Index;
|
||||
}
|
||||
|
||||
/// getJumpTableIndex - If this is a jump table reference, return
|
||||
/// the index into the jump table.
|
||||
unsigned getJumpTableIndex() const {
|
||||
assert(isJumpTableIndex() && "This is not a jump table reference!");
|
||||
return Target.Index;
|
||||
}
|
||||
|
||||
/// getResultPointer - Once this has been resolved to point to an actual
|
||||
/// address, this returns the pointer.
|
||||
void *getResultPointer() const {
|
||||
assert(AddrType == isResult && "Result pointer isn't set yet!");
|
||||
return Target.Result;
|
||||
}
|
||||
|
||||
/// setResultPointer - Set the result to the specified pointer value.
|
||||
///
|
||||
void setResultPointer(void *Ptr) {
|
||||
Target.Result = Ptr;
|
||||
AddrType = isResult;
|
||||
}
|
||||
|
||||
/// setGOTIndex - Set the GOT index to a specific value.
|
||||
void setGOTIndex(unsigned idx) {
|
||||
AddrType = isGOTIndex;
|
||||
Target.GOTIndex = idx;
|
||||
}
|
||||
|
||||
/// getGOTIndex - Once this has been resolved to an entry in the GOT,
|
||||
/// this returns that index. The index is from the lowest address entry
|
||||
/// in the GOT.
|
||||
unsigned getGOTIndex() const {
|
||||
assert(AddrType == isGOTIndex);
|
||||
return Target.GOTIndex;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
117
thirdparty/clang/include/llvm/CodeGen/MachineSSAUpdater.h
vendored
Normal file
117
thirdparty/clang/include/llvm/CodeGen/MachineSSAUpdater.h
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
//===-- MachineSSAUpdater.h - Unstructured SSA Update Tool ------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file declares the MachineSSAUpdater class.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINESSAUPDATER_H
|
||||
#define LLVM_CODEGEN_MACHINESSAUPDATER_H
|
||||
|
||||
#include "llvm/Support/Compiler.h"
|
||||
|
||||
namespace llvm {
|
||||
class MachineBasicBlock;
|
||||
class MachineFunction;
|
||||
class MachineInstr;
|
||||
class MachineOperand;
|
||||
class MachineRegisterInfo;
|
||||
class TargetInstrInfo;
|
||||
class TargetRegisterClass;
|
||||
template<typename T> class SmallVectorImpl;
|
||||
template<typename T> class SSAUpdaterTraits;
|
||||
class BumpPtrAllocator;
|
||||
|
||||
/// MachineSSAUpdater - This class updates SSA form for a set of virtual
|
||||
/// registers defined in multiple blocks. This is used when code duplication
|
||||
/// or another unstructured transformation wants to rewrite a set of uses of one
|
||||
/// vreg with uses of a set of vregs.
|
||||
class MachineSSAUpdater {
|
||||
friend class SSAUpdaterTraits<MachineSSAUpdater>;
|
||||
|
||||
private:
|
||||
/// AvailableVals - This keeps track of which value to use on a per-block
|
||||
/// basis. When we insert PHI nodes, we keep track of them here.
|
||||
//typedef DenseMap<MachineBasicBlock*, unsigned > AvailableValsTy;
|
||||
void *AV;
|
||||
|
||||
/// VR - Current virtual register whose uses are being updated.
|
||||
unsigned VR;
|
||||
|
||||
/// VRC - Register class of the current virtual register.
|
||||
const TargetRegisterClass *VRC;
|
||||
|
||||
/// InsertedPHIs - If this is non-null, the MachineSSAUpdater adds all PHI
|
||||
/// nodes that it creates to the vector.
|
||||
SmallVectorImpl<MachineInstr*> *InsertedPHIs;
|
||||
|
||||
const TargetInstrInfo *TII;
|
||||
MachineRegisterInfo *MRI;
|
||||
public:
|
||||
/// MachineSSAUpdater constructor. If InsertedPHIs is specified, it will be
|
||||
/// filled in with all PHI Nodes created by rewriting.
|
||||
explicit MachineSSAUpdater(MachineFunction &MF,
|
||||
SmallVectorImpl<MachineInstr*> *InsertedPHIs = 0);
|
||||
~MachineSSAUpdater();
|
||||
|
||||
/// Initialize - Reset this object to get ready for a new set of SSA
|
||||
/// updates.
|
||||
void Initialize(unsigned V);
|
||||
|
||||
/// AddAvailableValue - Indicate that a rewritten value is available at the
|
||||
/// end of the specified block with the specified value.
|
||||
void AddAvailableValue(MachineBasicBlock *BB, unsigned V);
|
||||
|
||||
/// HasValueForBlock - Return true if the MachineSSAUpdater already has a
|
||||
/// value for the specified block.
|
||||
bool HasValueForBlock(MachineBasicBlock *BB) const;
|
||||
|
||||
/// GetValueAtEndOfBlock - Construct SSA form, materializing a value that is
|
||||
/// live at the end of the specified block.
|
||||
unsigned GetValueAtEndOfBlock(MachineBasicBlock *BB);
|
||||
|
||||
/// GetValueInMiddleOfBlock - Construct SSA form, materializing a value that
|
||||
/// is live in the middle of the specified block.
|
||||
///
|
||||
/// GetValueInMiddleOfBlock is the same as GetValueAtEndOfBlock except in one
|
||||
/// important case: if there is a definition of the rewritten value after the
|
||||
/// 'use' in BB. Consider code like this:
|
||||
///
|
||||
/// X1 = ...
|
||||
/// SomeBB:
|
||||
/// use(X)
|
||||
/// X2 = ...
|
||||
/// br Cond, SomeBB, OutBB
|
||||
///
|
||||
/// In this case, there are two values (X1 and X2) added to the AvailableVals
|
||||
/// set by the client of the rewriter, and those values are both live out of
|
||||
/// their respective blocks. However, the use of X happens in the *middle* of
|
||||
/// a block. Because of this, we need to insert a new PHI node in SomeBB to
|
||||
/// merge the appropriate values, and this value isn't live out of the block.
|
||||
///
|
||||
unsigned GetValueInMiddleOfBlock(MachineBasicBlock *BB);
|
||||
|
||||
/// RewriteUse - Rewrite a use of the symbolic value. This handles PHI nodes,
|
||||
/// which use their value in the corresponding predecessor. Note that this
|
||||
/// will not work if the use is supposed to be rewritten to a value defined in
|
||||
/// the same block as the use, but above it. Any 'AddAvailableValue's added
|
||||
/// for the use's block will be considered to be below it.
|
||||
void RewriteUse(MachineOperand &U);
|
||||
|
||||
private:
|
||||
void ReplaceRegWith(unsigned OldReg, unsigned NewReg);
|
||||
unsigned GetValueAtEndOfBlockInternal(MachineBasicBlock *BB);
|
||||
|
||||
void operator=(const MachineSSAUpdater&) LLVM_DELETED_FUNCTION;
|
||||
MachineSSAUpdater(const MachineSSAUpdater&) LLVM_DELETED_FUNCTION;
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
382
thirdparty/clang/include/llvm/CodeGen/MachineScheduler.h
vendored
Normal file
382
thirdparty/clang/include/llvm/CodeGen/MachineScheduler.h
vendored
Normal file
@@ -0,0 +1,382 @@
|
||||
//==- MachineScheduler.h - MachineInstr Scheduling Pass ----------*- C++ -*-==//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file provides a MachineSchedRegistry for registering alternative machine
|
||||
// schedulers. A Target may provide an alternative scheduler implementation by
|
||||
// implementing the following boilerplate:
|
||||
//
|
||||
// static ScheduleDAGInstrs *createCustomMachineSched(MachineSchedContext *C) {
|
||||
// return new CustomMachineScheduler(C);
|
||||
// }
|
||||
// static MachineSchedRegistry
|
||||
// SchedCustomRegistry("custom", "Run my target's custom scheduler",
|
||||
// createCustomMachineSched);
|
||||
//
|
||||
// Inside <Target>PassConfig:
|
||||
// enablePass(&MachineSchedulerID);
|
||||
// MachineSchedRegistry::setDefault(createCustomMachineSched);
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINESCHEDULER_H
|
||||
#define LLVM_CODEGEN_MACHINESCHEDULER_H
|
||||
|
||||
#include "llvm/CodeGen/MachinePassRegistry.h"
|
||||
#include "llvm/CodeGen/RegisterPressure.h"
|
||||
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
|
||||
#include "llvm/Target/TargetInstrInfo.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
extern cl::opt<bool> ForceTopDown;
|
||||
extern cl::opt<bool> ForceBottomUp;
|
||||
|
||||
class AliasAnalysis;
|
||||
class LiveIntervals;
|
||||
class MachineDominatorTree;
|
||||
class MachineLoopInfo;
|
||||
class RegisterClassInfo;
|
||||
class ScheduleDAGInstrs;
|
||||
class SchedDFSResult;
|
||||
|
||||
/// MachineSchedContext provides enough context from the MachineScheduler pass
|
||||
/// for the target to instantiate a scheduler.
|
||||
struct MachineSchedContext {
|
||||
MachineFunction *MF;
|
||||
const MachineLoopInfo *MLI;
|
||||
const MachineDominatorTree *MDT;
|
||||
const TargetPassConfig *PassConfig;
|
||||
AliasAnalysis *AA;
|
||||
LiveIntervals *LIS;
|
||||
|
||||
RegisterClassInfo *RegClassInfo;
|
||||
|
||||
MachineSchedContext();
|
||||
virtual ~MachineSchedContext();
|
||||
};
|
||||
|
||||
/// MachineSchedRegistry provides a selection of available machine instruction
|
||||
/// schedulers.
|
||||
class MachineSchedRegistry : public MachinePassRegistryNode {
|
||||
public:
|
||||
typedef ScheduleDAGInstrs *(*ScheduleDAGCtor)(MachineSchedContext *);
|
||||
|
||||
// RegisterPassParser requires a (misnamed) FunctionPassCtor type.
|
||||
typedef ScheduleDAGCtor FunctionPassCtor;
|
||||
|
||||
static MachinePassRegistry Registry;
|
||||
|
||||
MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C)
|
||||
: MachinePassRegistryNode(N, D, (MachinePassCtor)C) {
|
||||
Registry.Add(this);
|
||||
}
|
||||
~MachineSchedRegistry() { Registry.Remove(this); }
|
||||
|
||||
// Accessors.
|
||||
//
|
||||
MachineSchedRegistry *getNext() const {
|
||||
return (MachineSchedRegistry *)MachinePassRegistryNode::getNext();
|
||||
}
|
||||
static MachineSchedRegistry *getList() {
|
||||
return (MachineSchedRegistry *)Registry.getList();
|
||||
}
|
||||
static ScheduleDAGCtor getDefault() {
|
||||
return (ScheduleDAGCtor)Registry.getDefault();
|
||||
}
|
||||
static void setDefault(ScheduleDAGCtor C) {
|
||||
Registry.setDefault((MachinePassCtor)C);
|
||||
}
|
||||
static void setDefault(StringRef Name) {
|
||||
Registry.setDefault(Name);
|
||||
}
|
||||
static void setListener(MachinePassRegistryListener *L) {
|
||||
Registry.setListener(L);
|
||||
}
|
||||
};
|
||||
|
||||
class ScheduleDAGMI;
|
||||
|
||||
/// MachineSchedStrategy - Interface to the scheduling algorithm used by
|
||||
/// ScheduleDAGMI.
|
||||
class MachineSchedStrategy {
|
||||
public:
|
||||
virtual ~MachineSchedStrategy() {}
|
||||
|
||||
/// Initialize the strategy after building the DAG for a new region.
|
||||
virtual void initialize(ScheduleDAGMI *DAG) = 0;
|
||||
|
||||
/// Notify this strategy that all roots have been released (including those
|
||||
/// that depend on EntrySU or ExitSU).
|
||||
virtual void registerRoots() {}
|
||||
|
||||
/// Pick the next node to schedule, or return NULL. Set IsTopNode to true to
|
||||
/// schedule the node at the top of the unscheduled region. Otherwise it will
|
||||
/// be scheduled at the bottom.
|
||||
virtual SUnit *pickNode(bool &IsTopNode) = 0;
|
||||
|
||||
/// \brief Scheduler callback to notify that a new subtree is scheduled.
|
||||
virtual void scheduleTree(unsigned SubtreeID) {}
|
||||
|
||||
/// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an
|
||||
/// instruction and updated scheduled/remaining flags in the DAG nodes.
|
||||
virtual void schedNode(SUnit *SU, bool IsTopNode) = 0;
|
||||
|
||||
/// When all predecessor dependencies have been resolved, free this node for
|
||||
/// top-down scheduling.
|
||||
virtual void releaseTopNode(SUnit *SU) = 0;
|
||||
/// When all successor dependencies have been resolved, free this node for
|
||||
/// bottom-up scheduling.
|
||||
virtual void releaseBottomNode(SUnit *SU) = 0;
|
||||
};
|
||||
|
||||
/// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience
|
||||
/// methods for pushing and removing nodes. ReadyQueue's are uniquely identified
|
||||
/// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in.
|
||||
///
|
||||
/// This is a convenience class that may be used by implementations of
|
||||
/// MachineSchedStrategy.
|
||||
class ReadyQueue {
|
||||
unsigned ID;
|
||||
std::string Name;
|
||||
std::vector<SUnit*> Queue;
|
||||
|
||||
public:
|
||||
ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {}
|
||||
|
||||
unsigned getID() const { return ID; }
|
||||
|
||||
StringRef getName() const { return Name; }
|
||||
|
||||
// SU is in this queue if it's NodeQueueID is a superset of this ID.
|
||||
bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); }
|
||||
|
||||
bool empty() const { return Queue.empty(); }
|
||||
|
||||
void clear() { Queue.clear(); }
|
||||
|
||||
unsigned size() const { return Queue.size(); }
|
||||
|
||||
typedef std::vector<SUnit*>::iterator iterator;
|
||||
|
||||
iterator begin() { return Queue.begin(); }
|
||||
|
||||
iterator end() { return Queue.end(); }
|
||||
|
||||
ArrayRef<SUnit*> elements() { return Queue; }
|
||||
|
||||
iterator find(SUnit *SU) {
|
||||
return std::find(Queue.begin(), Queue.end(), SU);
|
||||
}
|
||||
|
||||
void push(SUnit *SU) {
|
||||
Queue.push_back(SU);
|
||||
SU->NodeQueueId |= ID;
|
||||
}
|
||||
|
||||
iterator remove(iterator I) {
|
||||
(*I)->NodeQueueId &= ~ID;
|
||||
*I = Queue.back();
|
||||
unsigned idx = I - Queue.begin();
|
||||
Queue.pop_back();
|
||||
return Queue.begin() + idx;
|
||||
}
|
||||
|
||||
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
||||
void dump();
|
||||
#endif
|
||||
};
|
||||
|
||||
/// Mutate the DAG as a postpass after normal DAG building.
|
||||
class ScheduleDAGMutation {
|
||||
public:
|
||||
virtual ~ScheduleDAGMutation() {}
|
||||
|
||||
virtual void apply(ScheduleDAGMI *DAG) = 0;
|
||||
};
|
||||
|
||||
/// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that schedules
|
||||
/// machine instructions while updating LiveIntervals and tracking regpressure.
|
||||
class ScheduleDAGMI : public ScheduleDAGInstrs {
|
||||
protected:
|
||||
AliasAnalysis *AA;
|
||||
RegisterClassInfo *RegClassInfo;
|
||||
MachineSchedStrategy *SchedImpl;
|
||||
|
||||
/// Information about DAG subtrees. If DFSResult is NULL, then SchedulerTrees
|
||||
/// will be empty.
|
||||
SchedDFSResult *DFSResult;
|
||||
BitVector ScheduledTrees;
|
||||
|
||||
/// Topo - A topological ordering for SUnits which permits fast IsReachable
|
||||
/// and similar queries.
|
||||
ScheduleDAGTopologicalSort Topo;
|
||||
|
||||
/// Ordered list of DAG postprocessing steps.
|
||||
std::vector<ScheduleDAGMutation*> Mutations;
|
||||
|
||||
MachineBasicBlock::iterator LiveRegionEnd;
|
||||
|
||||
/// Register pressure in this region computed by buildSchedGraph.
|
||||
IntervalPressure RegPressure;
|
||||
RegPressureTracker RPTracker;
|
||||
|
||||
/// List of pressure sets that exceed the target's pressure limit before
|
||||
/// scheduling, listed in increasing set ID order. Each pressure set is paired
|
||||
/// with its max pressure in the currently scheduled regions.
|
||||
std::vector<PressureElement> RegionCriticalPSets;
|
||||
|
||||
/// The top of the unscheduled zone.
|
||||
MachineBasicBlock::iterator CurrentTop;
|
||||
IntervalPressure TopPressure;
|
||||
RegPressureTracker TopRPTracker;
|
||||
|
||||
/// The bottom of the unscheduled zone.
|
||||
MachineBasicBlock::iterator CurrentBottom;
|
||||
IntervalPressure BotPressure;
|
||||
RegPressureTracker BotRPTracker;
|
||||
|
||||
/// Record the next node in a scheduled cluster.
|
||||
const SUnit *NextClusterPred;
|
||||
const SUnit *NextClusterSucc;
|
||||
|
||||
#ifndef NDEBUG
|
||||
/// The number of instructions scheduled so far. Used to cut off the
|
||||
/// scheduler at the point determined by misched-cutoff.
|
||||
unsigned NumInstrsScheduled;
|
||||
#endif
|
||||
|
||||
public:
|
||||
ScheduleDAGMI(MachineSchedContext *C, MachineSchedStrategy *S):
|
||||
ScheduleDAGInstrs(*C->MF, *C->MLI, *C->MDT, /*IsPostRA=*/false, C->LIS),
|
||||
AA(C->AA), RegClassInfo(C->RegClassInfo), SchedImpl(S), DFSResult(0),
|
||||
Topo(SUnits, &ExitSU), RPTracker(RegPressure), CurrentTop(),
|
||||
TopRPTracker(TopPressure), CurrentBottom(), BotRPTracker(BotPressure),
|
||||
NextClusterPred(NULL), NextClusterSucc(NULL) {
|
||||
#ifndef NDEBUG
|
||||
NumInstrsScheduled = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
virtual ~ScheduleDAGMI();
|
||||
|
||||
/// Add a postprocessing step to the DAG builder.
|
||||
/// Mutations are applied in the order that they are added after normal DAG
|
||||
/// building and before MachineSchedStrategy initialization.
|
||||
///
|
||||
/// ScheduleDAGMI takes ownership of the Mutation object.
|
||||
void addMutation(ScheduleDAGMutation *Mutation) {
|
||||
Mutations.push_back(Mutation);
|
||||
}
|
||||
|
||||
/// \brief Add a DAG edge to the given SU with the given predecessor
|
||||
/// dependence data.
|
||||
///
|
||||
/// \returns true if the edge may be added without creating a cycle OR if an
|
||||
/// equivalent edge already existed (false indicates failure).
|
||||
bool addEdge(SUnit *SuccSU, const SDep &PredDep);
|
||||
|
||||
MachineBasicBlock::iterator top() const { return CurrentTop; }
|
||||
MachineBasicBlock::iterator bottom() const { return CurrentBottom; }
|
||||
|
||||
/// Implement the ScheduleDAGInstrs interface for handling the next scheduling
|
||||
/// region. This covers all instructions in a block, while schedule() may only
|
||||
/// cover a subset.
|
||||
void enterRegion(MachineBasicBlock *bb,
|
||||
MachineBasicBlock::iterator begin,
|
||||
MachineBasicBlock::iterator end,
|
||||
unsigned endcount);
|
||||
|
||||
|
||||
/// Implement ScheduleDAGInstrs interface for scheduling a sequence of
|
||||
/// reorderable instructions.
|
||||
virtual void schedule();
|
||||
|
||||
/// Change the position of an instruction within the basic block and update
|
||||
/// live ranges and region boundary iterators.
|
||||
void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos);
|
||||
|
||||
/// Get current register pressure for the top scheduled instructions.
|
||||
const IntervalPressure &getTopPressure() const { return TopPressure; }
|
||||
const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; }
|
||||
|
||||
/// Get current register pressure for the bottom scheduled instructions.
|
||||
const IntervalPressure &getBotPressure() const { return BotPressure; }
|
||||
const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; }
|
||||
|
||||
/// Get register pressure for the entire scheduling region before scheduling.
|
||||
const IntervalPressure &getRegPressure() const { return RegPressure; }
|
||||
|
||||
const std::vector<PressureElement> &getRegionCriticalPSets() const {
|
||||
return RegionCriticalPSets;
|
||||
}
|
||||
|
||||
const SUnit *getNextClusterPred() const { return NextClusterPred; }
|
||||
|
||||
const SUnit *getNextClusterSucc() const { return NextClusterSucc; }
|
||||
|
||||
/// Compute a DFSResult after DAG building is complete, and before any
|
||||
/// queue comparisons.
|
||||
void computeDFSResult();
|
||||
|
||||
/// Return a non-null DFS result if the scheduling strategy initialized it.
|
||||
const SchedDFSResult *getDFSResult() const { return DFSResult; }
|
||||
|
||||
BitVector &getScheduledTrees() { return ScheduledTrees; }
|
||||
|
||||
void viewGraph(const Twine &Name, const Twine &Title) LLVM_OVERRIDE;
|
||||
void viewGraph() LLVM_OVERRIDE;
|
||||
|
||||
protected:
|
||||
// Top-Level entry points for the schedule() driver...
|
||||
|
||||
/// Call ScheduleDAGInstrs::buildSchedGraph with register pressure tracking
|
||||
/// enabled. This sets up three trackers. RPTracker will cover the entire DAG
|
||||
/// region, TopTracker and BottomTracker will be initialized to the top and
|
||||
/// bottom of the DAG region without covereing any unscheduled instruction.
|
||||
void buildDAGWithRegPressure();
|
||||
|
||||
/// Apply each ScheduleDAGMutation step in order. This allows different
|
||||
/// instances of ScheduleDAGMI to perform custom DAG postprocessing.
|
||||
void postprocessDAG();
|
||||
|
||||
/// Release ExitSU predecessors and setup scheduler queues.
|
||||
void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots);
|
||||
|
||||
/// Move an instruction and update register pressure.
|
||||
void scheduleMI(SUnit *SU, bool IsTopNode);
|
||||
|
||||
/// Update scheduler DAG and queues after scheduling an instruction.
|
||||
void updateQueues(SUnit *SU, bool IsTopNode);
|
||||
|
||||
/// Reinsert debug_values recorded in ScheduleDAGInstrs::DbgValues.
|
||||
void placeDebugValues();
|
||||
|
||||
/// \brief dump the scheduled Sequence.
|
||||
void dumpSchedule() const;
|
||||
|
||||
// Lesser helpers...
|
||||
|
||||
void initRegPressure();
|
||||
|
||||
void updateScheduledPressure(const std::vector<unsigned> &NewMaxPressure);
|
||||
|
||||
bool checkSchedLimit();
|
||||
|
||||
void findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
|
||||
SmallVectorImpl<SUnit*> &BotRoots);
|
||||
|
||||
void releaseSucc(SUnit *SU, SDep *SuccEdge);
|
||||
void releaseSuccessors(SUnit *SU);
|
||||
void releasePred(SUnit *SU, SDep *PredEdge);
|
||||
void releasePredecessors(SUnit *SU);
|
||||
};
|
||||
|
||||
} // namespace llvm
|
||||
|
||||
#endif
|
||||
388
thirdparty/clang/include/llvm/CodeGen/MachineTraceMetrics.h
vendored
Normal file
388
thirdparty/clang/include/llvm/CodeGen/MachineTraceMetrics.h
vendored
Normal file
@@ -0,0 +1,388 @@
|
||||
//===- lib/CodeGen/MachineTraceMetrics.h - Super-scalar metrics -*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines the interface for the MachineTraceMetrics analysis pass
|
||||
// that estimates CPU resource usage and critical data dependency paths through
|
||||
// preferred traces. This is useful for super-scalar CPUs where execution speed
|
||||
// can be limited both by data dependencies and by limited execution resources.
|
||||
//
|
||||
// Out-of-order CPUs will often be executing instructions from multiple basic
|
||||
// blocks at the same time. This makes it difficult to estimate the resource
|
||||
// usage accurately in a single basic block. Resources can be estimated better
|
||||
// by looking at a trace through the current basic block.
|
||||
//
|
||||
// For every block, the MachineTraceMetrics pass will pick a preferred trace
|
||||
// that passes through the block. The trace is chosen based on loop structure,
|
||||
// branch probabilities, and resource usage. The intention is to pick likely
|
||||
// traces that would be the most affected by code transformations.
|
||||
//
|
||||
// It is expensive to compute a full arbitrary trace for every block, so to
|
||||
// save some computations, traces are chosen to be convergent. This means that
|
||||
// if the traces through basic blocks A and B ever cross when moving away from
|
||||
// A and B, they never diverge again. This applies in both directions - If the
|
||||
// traces meet above A and B, they won't diverge when going further back.
|
||||
//
|
||||
// Traces tend to align with loops. The trace through a block in an inner loop
|
||||
// will begin at the loop entry block and end at a back edge. If there are
|
||||
// nested loops, the trace may begin and end at those instead.
|
||||
//
|
||||
// For each trace, we compute the critical path length, which is the number of
|
||||
// cycles required to execute the trace when execution is limited by data
|
||||
// dependencies only. We also compute the resource height, which is the number
|
||||
// of cycles required to execute all instructions in the trace when ignoring
|
||||
// data dependencies.
|
||||
//
|
||||
// Every instruction in the current block has a slack - the number of cycles
|
||||
// execution of the instruction can be delayed without extending the critical
|
||||
// path.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_MACHINE_TRACE_METRICS_H
|
||||
#define LLVM_CODEGEN_MACHINE_TRACE_METRICS_H
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
#include "llvm/CodeGen/TargetSchedule.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class InstrItineraryData;
|
||||
class MachineBasicBlock;
|
||||
class MachineInstr;
|
||||
class MachineLoop;
|
||||
class MachineLoopInfo;
|
||||
class MachineRegisterInfo;
|
||||
class TargetInstrInfo;
|
||||
class TargetRegisterInfo;
|
||||
class raw_ostream;
|
||||
|
||||
class MachineTraceMetrics : public MachineFunctionPass {
|
||||
const MachineFunction *MF;
|
||||
const TargetInstrInfo *TII;
|
||||
const TargetRegisterInfo *TRI;
|
||||
const MachineRegisterInfo *MRI;
|
||||
const MachineLoopInfo *Loops;
|
||||
TargetSchedModel SchedModel;
|
||||
|
||||
public:
|
||||
class Ensemble;
|
||||
class Trace;
|
||||
static char ID;
|
||||
MachineTraceMetrics();
|
||||
void getAnalysisUsage(AnalysisUsage&) const;
|
||||
bool runOnMachineFunction(MachineFunction&);
|
||||
void releaseMemory();
|
||||
void verifyAnalysis() const;
|
||||
|
||||
friend class Ensemble;
|
||||
friend class Trace;
|
||||
|
||||
/// Per-basic block information that doesn't depend on the trace through the
|
||||
/// block.
|
||||
struct FixedBlockInfo {
|
||||
/// The number of non-trivial instructions in the block.
|
||||
/// Doesn't count PHI and COPY instructions that are likely to be removed.
|
||||
unsigned InstrCount;
|
||||
|
||||
/// True when the block contains calls.
|
||||
bool HasCalls;
|
||||
|
||||
FixedBlockInfo() : InstrCount(~0u), HasCalls(false) {}
|
||||
|
||||
/// Returns true when resource information for this block has been computed.
|
||||
bool hasResources() const { return InstrCount != ~0u; }
|
||||
|
||||
/// Invalidate resource information.
|
||||
void invalidate() { InstrCount = ~0u; }
|
||||
};
|
||||
|
||||
/// Get the fixed resource information about MBB. Compute it on demand.
|
||||
const FixedBlockInfo *getResources(const MachineBasicBlock*);
|
||||
|
||||
/// Get the scaled number of cycles used per processor resource in MBB.
|
||||
/// This is an array with SchedModel.getNumProcResourceKinds() entries.
|
||||
/// The getResources() function above must have been called first.
|
||||
///
|
||||
/// These numbers have already been scaled by SchedModel.getResourceFactor().
|
||||
ArrayRef<unsigned> getProcResourceCycles(unsigned MBBNum) const;
|
||||
|
||||
/// A virtual register or regunit required by a basic block or its trace
|
||||
/// successors.
|
||||
struct LiveInReg {
|
||||
/// The virtual register required, or a register unit.
|
||||
unsigned Reg;
|
||||
|
||||
/// For virtual registers: Minimum height of the defining instruction.
|
||||
/// For regunits: Height of the highest user in the trace.
|
||||
unsigned Height;
|
||||
|
||||
LiveInReg(unsigned Reg, unsigned Height = 0) : Reg(Reg), Height(Height) {}
|
||||
};
|
||||
|
||||
/// Per-basic block information that relates to a specific trace through the
|
||||
/// block. Convergent traces means that only one of these is required per
|
||||
/// block in a trace ensemble.
|
||||
struct TraceBlockInfo {
|
||||
/// Trace predecessor, or NULL for the first block in the trace.
|
||||
/// Valid when hasValidDepth().
|
||||
const MachineBasicBlock *Pred;
|
||||
|
||||
/// Trace successor, or NULL for the last block in the trace.
|
||||
/// Valid when hasValidHeight().
|
||||
const MachineBasicBlock *Succ;
|
||||
|
||||
/// The block number of the head of the trace. (When hasValidDepth()).
|
||||
unsigned Head;
|
||||
|
||||
/// The block number of the tail of the trace. (When hasValidHeight()).
|
||||
unsigned Tail;
|
||||
|
||||
/// Accumulated number of instructions in the trace above this block.
|
||||
/// Does not include instructions in this block.
|
||||
unsigned InstrDepth;
|
||||
|
||||
/// Accumulated number of instructions in the trace below this block.
|
||||
/// Includes instructions in this block.
|
||||
unsigned InstrHeight;
|
||||
|
||||
TraceBlockInfo() :
|
||||
Pred(0), Succ(0),
|
||||
InstrDepth(~0u), InstrHeight(~0u),
|
||||
HasValidInstrDepths(false), HasValidInstrHeights(false) {}
|
||||
|
||||
/// Returns true if the depth resources have been computed from the trace
|
||||
/// above this block.
|
||||
bool hasValidDepth() const { return InstrDepth != ~0u; }
|
||||
|
||||
/// Returns true if the height resources have been computed from the trace
|
||||
/// below this block.
|
||||
bool hasValidHeight() const { return InstrHeight != ~0u; }
|
||||
|
||||
/// Invalidate depth resources when some block above this one has changed.
|
||||
void invalidateDepth() { InstrDepth = ~0u; HasValidInstrDepths = false; }
|
||||
|
||||
/// Invalidate height resources when a block below this one has changed.
|
||||
void invalidateHeight() { InstrHeight = ~0u; HasValidInstrHeights = false; }
|
||||
|
||||
/// Assuming that this is a dominator of TBI, determine if it contains
|
||||
/// useful instruction depths. A dominating block can be above the current
|
||||
/// trace head, and any dependencies from such a far away dominator are not
|
||||
/// expected to affect the critical path.
|
||||
///
|
||||
/// Also returns true when TBI == this.
|
||||
bool isUsefulDominator(const TraceBlockInfo &TBI) const {
|
||||
// The trace for TBI may not even be calculated yet.
|
||||
if (!hasValidDepth() || !TBI.hasValidDepth())
|
||||
return false;
|
||||
// Instruction depths are only comparable if the traces share a head.
|
||||
if (Head != TBI.Head)
|
||||
return false;
|
||||
// It is almost always the case that TBI belongs to the same trace as
|
||||
// this block, but rare convoluted cases involving irreducible control
|
||||
// flow, a dominator may share a trace head without actually being on the
|
||||
// same trace as TBI. This is not a big problem as long as it doesn't
|
||||
// increase the instruction depth.
|
||||
return HasValidInstrDepths && InstrDepth <= TBI.InstrDepth;
|
||||
}
|
||||
|
||||
// Data-dependency-related information. Per-instruction depth and height
|
||||
// are computed from data dependencies in the current trace, using
|
||||
// itinerary data.
|
||||
|
||||
/// Instruction depths have been computed. This implies hasValidDepth().
|
||||
bool HasValidInstrDepths;
|
||||
|
||||
/// Instruction heights have been computed. This implies hasValidHeight().
|
||||
bool HasValidInstrHeights;
|
||||
|
||||
/// Critical path length. This is the number of cycles in the longest data
|
||||
/// dependency chain through the trace. This is only valid when both
|
||||
/// HasValidInstrDepths and HasValidInstrHeights are set.
|
||||
unsigned CriticalPath;
|
||||
|
||||
/// Live-in registers. These registers are defined above the current block
|
||||
/// and used by this block or a block below it.
|
||||
/// This does not include PHI uses in the current block, but it does
|
||||
/// include PHI uses in deeper blocks.
|
||||
SmallVector<LiveInReg, 4> LiveIns;
|
||||
|
||||
void print(raw_ostream&) const;
|
||||
};
|
||||
|
||||
/// InstrCycles represents the cycle height and depth of an instruction in a
|
||||
/// trace.
|
||||
struct InstrCycles {
|
||||
/// Earliest issue cycle as determined by data dependencies and instruction
|
||||
/// latencies from the beginning of the trace. Data dependencies from
|
||||
/// before the trace are not included.
|
||||
unsigned Depth;
|
||||
|
||||
/// Minimum number of cycles from this instruction is issued to the of the
|
||||
/// trace, as determined by data dependencies and instruction latencies.
|
||||
unsigned Height;
|
||||
};
|
||||
|
||||
/// A trace represents a plausible sequence of executed basic blocks that
|
||||
/// passes through the current basic block one. The Trace class serves as a
|
||||
/// handle to internal cached data structures.
|
||||
class Trace {
|
||||
Ensemble &TE;
|
||||
TraceBlockInfo &TBI;
|
||||
|
||||
unsigned getBlockNum() const { return &TBI - &TE.BlockInfo[0]; }
|
||||
|
||||
public:
|
||||
explicit Trace(Ensemble &te, TraceBlockInfo &tbi) : TE(te), TBI(tbi) {}
|
||||
void print(raw_ostream&) const;
|
||||
|
||||
/// Compute the total number of instructions in the trace.
|
||||
unsigned getInstrCount() const {
|
||||
return TBI.InstrDepth + TBI.InstrHeight;
|
||||
}
|
||||
|
||||
/// Return the resource depth of the top/bottom of the trace center block.
|
||||
/// This is the number of cycles required to execute all instructions from
|
||||
/// the trace head to the trace center block. The resource depth only
|
||||
/// considers execution resources, it ignores data dependencies.
|
||||
/// When Bottom is set, instructions in the trace center block are included.
|
||||
unsigned getResourceDepth(bool Bottom) const;
|
||||
|
||||
/// Return the resource length of the trace. This is the number of cycles
|
||||
/// required to execute the instructions in the trace if they were all
|
||||
/// independent, exposing the maximum instruction-level parallelism.
|
||||
///
|
||||
/// Any blocks in Extrablocks are included as if they were part of the
|
||||
/// trace.
|
||||
unsigned getResourceLength(ArrayRef<const MachineBasicBlock*> Extrablocks =
|
||||
ArrayRef<const MachineBasicBlock*>()) const;
|
||||
|
||||
/// Return the length of the (data dependency) critical path through the
|
||||
/// trace.
|
||||
unsigned getCriticalPath() const { return TBI.CriticalPath; }
|
||||
|
||||
/// Return the depth and height of MI. The depth is only valid for
|
||||
/// instructions in or above the trace center block. The height is only
|
||||
/// valid for instructions in or below the trace center block.
|
||||
InstrCycles getInstrCycles(const MachineInstr *MI) const {
|
||||
return TE.Cycles.lookup(MI);
|
||||
}
|
||||
|
||||
/// Return the slack of MI. This is the number of cycles MI can be delayed
|
||||
/// before the critical path becomes longer.
|
||||
/// MI must be an instruction in the trace center block.
|
||||
unsigned getInstrSlack(const MachineInstr *MI) const;
|
||||
|
||||
/// Return the Depth of a PHI instruction in a trace center block successor.
|
||||
/// The PHI does not have to be part of the trace.
|
||||
unsigned getPHIDepth(const MachineInstr *PHI) const;
|
||||
};
|
||||
|
||||
/// A trace ensemble is a collection of traces selected using the same
|
||||
/// strategy, for example 'minimum resource height'. There is one trace for
|
||||
/// every block in the function.
|
||||
class Ensemble {
|
||||
SmallVector<TraceBlockInfo, 4> BlockInfo;
|
||||
DenseMap<const MachineInstr*, InstrCycles> Cycles;
|
||||
SmallVector<unsigned, 0> ProcResourceDepths;
|
||||
SmallVector<unsigned, 0> ProcResourceHeights;
|
||||
friend class Trace;
|
||||
|
||||
void computeTrace(const MachineBasicBlock*);
|
||||
void computeDepthResources(const MachineBasicBlock*);
|
||||
void computeHeightResources(const MachineBasicBlock*);
|
||||
unsigned computeCrossBlockCriticalPath(const TraceBlockInfo&);
|
||||
void computeInstrDepths(const MachineBasicBlock*);
|
||||
void computeInstrHeights(const MachineBasicBlock*);
|
||||
void addLiveIns(const MachineInstr *DefMI, unsigned DefOp,
|
||||
ArrayRef<const MachineBasicBlock*> Trace);
|
||||
|
||||
protected:
|
||||
MachineTraceMetrics &MTM;
|
||||
virtual const MachineBasicBlock *pickTracePred(const MachineBasicBlock*) =0;
|
||||
virtual const MachineBasicBlock *pickTraceSucc(const MachineBasicBlock*) =0;
|
||||
explicit Ensemble(MachineTraceMetrics*);
|
||||
const MachineLoop *getLoopFor(const MachineBasicBlock*) const;
|
||||
const TraceBlockInfo *getDepthResources(const MachineBasicBlock*) const;
|
||||
const TraceBlockInfo *getHeightResources(const MachineBasicBlock*) const;
|
||||
ArrayRef<unsigned> getProcResourceDepths(unsigned MBBNum) const;
|
||||
ArrayRef<unsigned> getProcResourceHeights(unsigned MBBNum) const;
|
||||
|
||||
public:
|
||||
virtual ~Ensemble();
|
||||
virtual const char *getName() const =0;
|
||||
void print(raw_ostream&) const;
|
||||
void invalidate(const MachineBasicBlock *MBB);
|
||||
void verify() const;
|
||||
|
||||
/// Get the trace that passes through MBB.
|
||||
/// The trace is computed on demand.
|
||||
Trace getTrace(const MachineBasicBlock *MBB);
|
||||
};
|
||||
|
||||
/// Strategies for selecting traces.
|
||||
enum Strategy {
|
||||
/// Select the trace through a block that has the fewest instructions.
|
||||
TS_MinInstrCount,
|
||||
|
||||
TS_NumStrategies
|
||||
};
|
||||
|
||||
/// Get the trace ensemble representing the given trace selection strategy.
|
||||
/// The returned Ensemble object is owned by the MachineTraceMetrics analysis,
|
||||
/// and valid for the lifetime of the analysis pass.
|
||||
Ensemble *getEnsemble(Strategy);
|
||||
|
||||
/// Invalidate cached information about MBB. This must be called *before* MBB
|
||||
/// is erased, or the CFG is otherwise changed.
|
||||
///
|
||||
/// This invalidates per-block information about resource usage for MBB only,
|
||||
/// and it invalidates per-trace information for any trace that passes
|
||||
/// through MBB.
|
||||
///
|
||||
/// Call Ensemble::getTrace() again to update any trace handles.
|
||||
void invalidate(const MachineBasicBlock *MBB);
|
||||
|
||||
private:
|
||||
// One entry per basic block, indexed by block number.
|
||||
SmallVector<FixedBlockInfo, 4> BlockInfo;
|
||||
|
||||
// Cycles consumed on each processor resource per block.
|
||||
// The number of processor resource kinds is constant for a given subtarget,
|
||||
// but it is not known at compile time. The number of cycles consumed by
|
||||
// block B on processor resource R is at ProcResourceCycles[B*Kinds + R]
|
||||
// where Kinds = SchedModel.getNumProcResourceKinds().
|
||||
SmallVector<unsigned, 0> ProcResourceCycles;
|
||||
|
||||
// One ensemble per strategy.
|
||||
Ensemble* Ensembles[TS_NumStrategies];
|
||||
|
||||
// Convert scaled resource usage to a cycle count that can be compared with
|
||||
// latencies.
|
||||
unsigned getCycles(unsigned Scaled) {
|
||||
unsigned Factor = SchedModel.getLatencyFactor();
|
||||
return (Scaled + Factor - 1) / Factor;
|
||||
}
|
||||
};
|
||||
|
||||
inline raw_ostream &operator<<(raw_ostream &OS,
|
||||
const MachineTraceMetrics::Trace &Tr) {
|
||||
Tr.print(OS);
|
||||
return OS;
|
||||
}
|
||||
|
||||
inline raw_ostream &operator<<(raw_ostream &OS,
|
||||
const MachineTraceMetrics::Ensemble &En) {
|
||||
En.print(OS);
|
||||
return OS;
|
||||
}
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
466
thirdparty/clang/include/llvm/CodeGen/PBQP/Graph.h
vendored
Normal file
466
thirdparty/clang/include/llvm/CodeGen/PBQP/Graph.h
vendored
Normal file
@@ -0,0 +1,466 @@
|
||||
//===-------------------- Graph.h - PBQP Graph ------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// PBQP Graph class.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
|
||||
#ifndef LLVM_CODEGEN_PBQP_GRAPH_H
|
||||
#define LLVM_CODEGEN_PBQP_GRAPH_H
|
||||
|
||||
#include "Math.h"
|
||||
#include "llvm/ADT/ilist.h"
|
||||
#include "llvm/ADT/ilist_node.h"
|
||||
#include <list>
|
||||
#include <map>
|
||||
|
||||
namespace PBQP {
|
||||
|
||||
/// PBQP Graph class.
|
||||
/// Instances of this class describe PBQP problems.
|
||||
class Graph {
|
||||
private:
|
||||
|
||||
// ----- TYPEDEFS -----
|
||||
class NodeEntry;
|
||||
class EdgeEntry;
|
||||
|
||||
typedef llvm::ilist<NodeEntry> NodeList;
|
||||
typedef llvm::ilist<EdgeEntry> EdgeList;
|
||||
|
||||
public:
|
||||
|
||||
typedef NodeEntry* NodeItr;
|
||||
typedef const NodeEntry* ConstNodeItr;
|
||||
|
||||
typedef EdgeEntry* EdgeItr;
|
||||
typedef const EdgeEntry* ConstEdgeItr;
|
||||
|
||||
private:
|
||||
|
||||
typedef std::list<EdgeItr> AdjEdgeList;
|
||||
|
||||
public:
|
||||
|
||||
typedef AdjEdgeList::iterator AdjEdgeItr;
|
||||
|
||||
private:
|
||||
|
||||
class NodeEntry : public llvm::ilist_node<NodeEntry> {
|
||||
friend struct llvm::ilist_sentinel_traits<NodeEntry>;
|
||||
private:
|
||||
Vector costs;
|
||||
AdjEdgeList adjEdges;
|
||||
unsigned degree;
|
||||
void *data;
|
||||
NodeEntry() : costs(0, 0) {}
|
||||
public:
|
||||
NodeEntry(const Vector &costs) : costs(costs), degree(0) {}
|
||||
Vector& getCosts() { return costs; }
|
||||
const Vector& getCosts() const { return costs; }
|
||||
unsigned getDegree() const { return degree; }
|
||||
AdjEdgeItr edgesBegin() { return adjEdges.begin(); }
|
||||
AdjEdgeItr edgesEnd() { return adjEdges.end(); }
|
||||
AdjEdgeItr addEdge(EdgeItr e) {
|
||||
++degree;
|
||||
return adjEdges.insert(adjEdges.end(), e);
|
||||
}
|
||||
void removeEdge(AdjEdgeItr ae) {
|
||||
--degree;
|
||||
adjEdges.erase(ae);
|
||||
}
|
||||
void setData(void *data) { this->data = data; }
|
||||
void* getData() { return data; }
|
||||
};
|
||||
|
||||
class EdgeEntry : public llvm::ilist_node<EdgeEntry> {
|
||||
friend struct llvm::ilist_sentinel_traits<EdgeEntry>;
|
||||
private:
|
||||
NodeItr node1, node2;
|
||||
Matrix costs;
|
||||
AdjEdgeItr node1AEItr, node2AEItr;
|
||||
void *data;
|
||||
EdgeEntry() : costs(0, 0, 0) {}
|
||||
public:
|
||||
EdgeEntry(NodeItr node1, NodeItr node2, const Matrix &costs)
|
||||
: node1(node1), node2(node2), costs(costs) {}
|
||||
NodeItr getNode1() const { return node1; }
|
||||
NodeItr getNode2() const { return node2; }
|
||||
Matrix& getCosts() { return costs; }
|
||||
const Matrix& getCosts() const { return costs; }
|
||||
void setNode1AEItr(AdjEdgeItr ae) { node1AEItr = ae; }
|
||||
AdjEdgeItr getNode1AEItr() { return node1AEItr; }
|
||||
void setNode2AEItr(AdjEdgeItr ae) { node2AEItr = ae; }
|
||||
AdjEdgeItr getNode2AEItr() { return node2AEItr; }
|
||||
void setData(void *data) { this->data = data; }
|
||||
void *getData() { return data; }
|
||||
};
|
||||
|
||||
// ----- MEMBERS -----
|
||||
|
||||
NodeList nodes;
|
||||
unsigned numNodes;
|
||||
|
||||
EdgeList edges;
|
||||
unsigned numEdges;
|
||||
|
||||
// ----- INTERNAL METHODS -----
|
||||
|
||||
NodeEntry& getNode(NodeItr nItr) { return *nItr; }
|
||||
const NodeEntry& getNode(ConstNodeItr nItr) const { return *nItr; }
|
||||
|
||||
EdgeEntry& getEdge(EdgeItr eItr) { return *eItr; }
|
||||
const EdgeEntry& getEdge(ConstEdgeItr eItr) const { return *eItr; }
|
||||
|
||||
NodeItr addConstructedNode(const NodeEntry &n) {
|
||||
++numNodes;
|
||||
return nodes.insert(nodes.end(), n);
|
||||
}
|
||||
|
||||
EdgeItr addConstructedEdge(const EdgeEntry &e) {
|
||||
assert(findEdge(e.getNode1(), e.getNode2()) == edges.end() &&
|
||||
"Attempt to add duplicate edge.");
|
||||
++numEdges;
|
||||
EdgeItr edgeItr = edges.insert(edges.end(), e);
|
||||
EdgeEntry &ne = getEdge(edgeItr);
|
||||
NodeEntry &n1 = getNode(ne.getNode1());
|
||||
NodeEntry &n2 = getNode(ne.getNode2());
|
||||
// Sanity check on matrix dimensions:
|
||||
assert((n1.getCosts().getLength() == ne.getCosts().getRows()) &&
|
||||
(n2.getCosts().getLength() == ne.getCosts().getCols()) &&
|
||||
"Edge cost dimensions do not match node costs dimensions.");
|
||||
ne.setNode1AEItr(n1.addEdge(edgeItr));
|
||||
ne.setNode2AEItr(n2.addEdge(edgeItr));
|
||||
return edgeItr;
|
||||
}
|
||||
|
||||
inline void copyFrom(const Graph &other);
|
||||
public:
|
||||
|
||||
/// \brief Construct an empty PBQP graph.
|
||||
Graph() : numNodes(0), numEdges(0) {}
|
||||
|
||||
/// \brief Copy construct this graph from "other". Note: Does not copy node
|
||||
/// and edge data, only graph structure and costs.
|
||||
/// @param other Source graph to copy from.
|
||||
Graph(const Graph &other) : numNodes(0), numEdges(0) {
|
||||
copyFrom(other);
|
||||
}
|
||||
|
||||
/// \brief Make this graph a copy of "other". Note: Does not copy node and
|
||||
/// edge data, only graph structure and costs.
|
||||
/// @param other The graph to copy from.
|
||||
/// @return A reference to this graph.
|
||||
///
|
||||
/// This will clear the current graph, erasing any nodes and edges added,
|
||||
/// before copying from other.
|
||||
Graph& operator=(const Graph &other) {
|
||||
clear();
|
||||
copyFrom(other);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/// \brief Add a node with the given costs.
|
||||
/// @param costs Cost vector for the new node.
|
||||
/// @return Node iterator for the added node.
|
||||
NodeItr addNode(const Vector &costs) {
|
||||
return addConstructedNode(NodeEntry(costs));
|
||||
}
|
||||
|
||||
/// \brief Add an edge between the given nodes with the given costs.
|
||||
/// @param n1Itr First node.
|
||||
/// @param n2Itr Second node.
|
||||
/// @return Edge iterator for the added edge.
|
||||
EdgeItr addEdge(Graph::NodeItr n1Itr, Graph::NodeItr n2Itr,
|
||||
const Matrix &costs) {
|
||||
assert(getNodeCosts(n1Itr).getLength() == costs.getRows() &&
|
||||
getNodeCosts(n2Itr).getLength() == costs.getCols() &&
|
||||
"Matrix dimensions mismatch.");
|
||||
return addConstructedEdge(EdgeEntry(n1Itr, n2Itr, costs));
|
||||
}
|
||||
|
||||
/// \brief Get the number of nodes in the graph.
|
||||
/// @return Number of nodes in the graph.
|
||||
unsigned getNumNodes() const { return numNodes; }
|
||||
|
||||
/// \brief Get the number of edges in the graph.
|
||||
/// @return Number of edges in the graph.
|
||||
unsigned getNumEdges() const { return numEdges; }
|
||||
|
||||
/// \brief Get a node's cost vector.
|
||||
/// @param nItr Node iterator.
|
||||
/// @return Node cost vector.
|
||||
Vector& getNodeCosts(NodeItr nItr) { return getNode(nItr).getCosts(); }
|
||||
|
||||
/// \brief Get a node's cost vector (const version).
|
||||
/// @param nItr Node iterator.
|
||||
/// @return Node cost vector.
|
||||
const Vector& getNodeCosts(ConstNodeItr nItr) const {
|
||||
return getNode(nItr).getCosts();
|
||||
}
|
||||
|
||||
/// \brief Set a node's data pointer.
|
||||
/// @param nItr Node iterator.
|
||||
/// @param data Pointer to node data.
|
||||
///
|
||||
/// Typically used by a PBQP solver to attach data to aid in solution.
|
||||
void setNodeData(NodeItr nItr, void *data) { getNode(nItr).setData(data); }
|
||||
|
||||
/// \brief Get the node's data pointer.
|
||||
/// @param nItr Node iterator.
|
||||
/// @return Pointer to node data.
|
||||
void* getNodeData(NodeItr nItr) { return getNode(nItr).getData(); }
|
||||
|
||||
/// \brief Get an edge's cost matrix.
|
||||
/// @param eItr Edge iterator.
|
||||
/// @return Edge cost matrix.
|
||||
Matrix& getEdgeCosts(EdgeItr eItr) { return getEdge(eItr).getCosts(); }
|
||||
|
||||
/// \brief Get an edge's cost matrix (const version).
|
||||
/// @param eItr Edge iterator.
|
||||
/// @return Edge cost matrix.
|
||||
const Matrix& getEdgeCosts(ConstEdgeItr eItr) const {
|
||||
return getEdge(eItr).getCosts();
|
||||
}
|
||||
|
||||
/// \brief Set an edge's data pointer.
|
||||
/// @param eItr Edge iterator.
|
||||
/// @param data Pointer to edge data.
|
||||
///
|
||||
/// Typically used by a PBQP solver to attach data to aid in solution.
|
||||
void setEdgeData(EdgeItr eItr, void *data) { getEdge(eItr).setData(data); }
|
||||
|
||||
/// \brief Get an edge's data pointer.
|
||||
/// @param eItr Edge iterator.
|
||||
/// @return Pointer to edge data.
|
||||
void* getEdgeData(EdgeItr eItr) { return getEdge(eItr).getData(); }
|
||||
|
||||
/// \brief Get a node's degree.
|
||||
/// @param nItr Node iterator.
|
||||
/// @return The degree of the node.
|
||||
unsigned getNodeDegree(NodeItr nItr) const {
|
||||
return getNode(nItr).getDegree();
|
||||
}
|
||||
|
||||
/// \brief Begin iterator for node set.
|
||||
NodeItr nodesBegin() { return nodes.begin(); }
|
||||
|
||||
/// \brief Begin const iterator for node set.
|
||||
ConstNodeItr nodesBegin() const { return nodes.begin(); }
|
||||
|
||||
/// \brief End iterator for node set.
|
||||
NodeItr nodesEnd() { return nodes.end(); }
|
||||
|
||||
/// \brief End const iterator for node set.
|
||||
ConstNodeItr nodesEnd() const { return nodes.end(); }
|
||||
|
||||
/// \brief Begin iterator for edge set.
|
||||
EdgeItr edgesBegin() { return edges.begin(); }
|
||||
|
||||
/// \brief End iterator for edge set.
|
||||
EdgeItr edgesEnd() { return edges.end(); }
|
||||
|
||||
/// \brief Get begin iterator for adjacent edge set.
|
||||
/// @param nItr Node iterator.
|
||||
/// @return Begin iterator for the set of edges connected to the given node.
|
||||
AdjEdgeItr adjEdgesBegin(NodeItr nItr) {
|
||||
return getNode(nItr).edgesBegin();
|
||||
}
|
||||
|
||||
/// \brief Get end iterator for adjacent edge set.
|
||||
/// @param nItr Node iterator.
|
||||
/// @return End iterator for the set of edges connected to the given node.
|
||||
AdjEdgeItr adjEdgesEnd(NodeItr nItr) {
|
||||
return getNode(nItr).edgesEnd();
|
||||
}
|
||||
|
||||
/// \brief Get the first node connected to this edge.
|
||||
/// @param eItr Edge iterator.
|
||||
/// @return The first node connected to the given edge.
|
||||
NodeItr getEdgeNode1(EdgeItr eItr) {
|
||||
return getEdge(eItr).getNode1();
|
||||
}
|
||||
|
||||
/// \brief Get the second node connected to this edge.
|
||||
/// @param eItr Edge iterator.
|
||||
/// @return The second node connected to the given edge.
|
||||
NodeItr getEdgeNode2(EdgeItr eItr) {
|
||||
return getEdge(eItr).getNode2();
|
||||
}
|
||||
|
||||
/// \brief Get the "other" node connected to this edge.
|
||||
/// @param eItr Edge iterator.
|
||||
/// @param nItr Node iterator for the "given" node.
|
||||
/// @return The iterator for the "other" node connected to this edge.
|
||||
NodeItr getEdgeOtherNode(EdgeItr eItr, NodeItr nItr) {
|
||||
EdgeEntry &e = getEdge(eItr);
|
||||
if (e.getNode1() == nItr) {
|
||||
return e.getNode2();
|
||||
} // else
|
||||
return e.getNode1();
|
||||
}
|
||||
|
||||
/// \brief Get the edge connecting two nodes.
|
||||
/// @param n1Itr First node iterator.
|
||||
/// @param n2Itr Second node iterator.
|
||||
/// @return An iterator for edge (n1Itr, n2Itr) if such an edge exists,
|
||||
/// otherwise returns edgesEnd().
|
||||
EdgeItr findEdge(NodeItr n1Itr, NodeItr n2Itr) {
|
||||
for (AdjEdgeItr aeItr = adjEdgesBegin(n1Itr), aeEnd = adjEdgesEnd(n1Itr);
|
||||
aeItr != aeEnd; ++aeItr) {
|
||||
if ((getEdgeNode1(*aeItr) == n2Itr) ||
|
||||
(getEdgeNode2(*aeItr) == n2Itr)) {
|
||||
return *aeItr;
|
||||
}
|
||||
}
|
||||
return edges.end();
|
||||
}
|
||||
|
||||
/// \brief Remove a node from the graph.
|
||||
/// @param nItr Node iterator.
|
||||
void removeNode(NodeItr nItr) {
|
||||
NodeEntry &n = getNode(nItr);
|
||||
for (AdjEdgeItr itr = n.edgesBegin(), end = n.edgesEnd(); itr != end;) {
|
||||
EdgeItr eItr = *itr;
|
||||
++itr;
|
||||
removeEdge(eItr);
|
||||
}
|
||||
nodes.erase(nItr);
|
||||
--numNodes;
|
||||
}
|
||||
|
||||
/// \brief Remove an edge from the graph.
|
||||
/// @param eItr Edge iterator.
|
||||
void removeEdge(EdgeItr eItr) {
|
||||
EdgeEntry &e = getEdge(eItr);
|
||||
NodeEntry &n1 = getNode(e.getNode1());
|
||||
NodeEntry &n2 = getNode(e.getNode2());
|
||||
n1.removeEdge(e.getNode1AEItr());
|
||||
n2.removeEdge(e.getNode2AEItr());
|
||||
edges.erase(eItr);
|
||||
--numEdges;
|
||||
}
|
||||
|
||||
/// \brief Remove all nodes and edges from the graph.
|
||||
void clear() {
|
||||
nodes.clear();
|
||||
edges.clear();
|
||||
numNodes = numEdges = 0;
|
||||
}
|
||||
|
||||
/// \brief Dump a graph to an output stream.
|
||||
template <typename OStream>
|
||||
void dump(OStream &os) {
|
||||
os << getNumNodes() << " " << getNumEdges() << "\n";
|
||||
|
||||
for (NodeItr nodeItr = nodesBegin(), nodeEnd = nodesEnd();
|
||||
nodeItr != nodeEnd; ++nodeItr) {
|
||||
const Vector& v = getNodeCosts(nodeItr);
|
||||
os << "\n" << v.getLength() << "\n";
|
||||
assert(v.getLength() != 0 && "Empty vector in graph.");
|
||||
os << v[0];
|
||||
for (unsigned i = 1; i < v.getLength(); ++i) {
|
||||
os << " " << v[i];
|
||||
}
|
||||
os << "\n";
|
||||
}
|
||||
|
||||
for (EdgeItr edgeItr = edgesBegin(), edgeEnd = edgesEnd();
|
||||
edgeItr != edgeEnd; ++edgeItr) {
|
||||
unsigned n1 = std::distance(nodesBegin(), getEdgeNode1(edgeItr));
|
||||
unsigned n2 = std::distance(nodesBegin(), getEdgeNode2(edgeItr));
|
||||
assert(n1 != n2 && "PBQP graphs shound not have self-edges.");
|
||||
const Matrix& m = getEdgeCosts(edgeItr);
|
||||
os << "\n" << n1 << " " << n2 << "\n"
|
||||
<< m.getRows() << " " << m.getCols() << "\n";
|
||||
assert(m.getRows() != 0 && "No rows in matrix.");
|
||||
assert(m.getCols() != 0 && "No cols in matrix.");
|
||||
for (unsigned i = 0; i < m.getRows(); ++i) {
|
||||
os << m[i][0];
|
||||
for (unsigned j = 1; j < m.getCols(); ++j) {
|
||||
os << " " << m[i][j];
|
||||
}
|
||||
os << "\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// \brief Print a representation of this graph in DOT format.
|
||||
/// @param os Output stream to print on.
|
||||
template <typename OStream>
|
||||
void printDot(OStream &os) {
|
||||
|
||||
os << "graph {\n";
|
||||
|
||||
for (NodeItr nodeItr = nodesBegin(), nodeEnd = nodesEnd();
|
||||
nodeItr != nodeEnd; ++nodeItr) {
|
||||
|
||||
os << " node" << nodeItr << " [ label=\""
|
||||
<< nodeItr << ": " << getNodeCosts(nodeItr) << "\" ]\n";
|
||||
}
|
||||
|
||||
os << " edge [ len=" << getNumNodes() << " ]\n";
|
||||
|
||||
for (EdgeItr edgeItr = edgesBegin(), edgeEnd = edgesEnd();
|
||||
edgeItr != edgeEnd; ++edgeItr) {
|
||||
|
||||
os << " node" << getEdgeNode1(edgeItr)
|
||||
<< " -- node" << getEdgeNode2(edgeItr)
|
||||
<< " [ label=\"";
|
||||
|
||||
const Matrix &edgeCosts = getEdgeCosts(edgeItr);
|
||||
|
||||
for (unsigned i = 0; i < edgeCosts.getRows(); ++i) {
|
||||
os << edgeCosts.getRowAsVector(i) << "\\n";
|
||||
}
|
||||
os << "\" ]\n";
|
||||
}
|
||||
os << "}\n";
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
class NodeItrComparator {
|
||||
public:
|
||||
bool operator()(Graph::NodeItr n1, Graph::NodeItr n2) const {
|
||||
return &*n1 < &*n2;
|
||||
}
|
||||
|
||||
bool operator()(Graph::ConstNodeItr n1, Graph::ConstNodeItr n2) const {
|
||||
return &*n1 < &*n2;
|
||||
}
|
||||
};
|
||||
|
||||
class EdgeItrCompartor {
|
||||
public:
|
||||
bool operator()(Graph::EdgeItr e1, Graph::EdgeItr e2) const {
|
||||
return &*e1 < &*e2;
|
||||
}
|
||||
|
||||
bool operator()(Graph::ConstEdgeItr e1, Graph::ConstEdgeItr e2) const {
|
||||
return &*e1 < &*e2;
|
||||
}
|
||||
};
|
||||
|
||||
void Graph::copyFrom(const Graph &other) {
|
||||
std::map<Graph::ConstNodeItr, Graph::NodeItr,
|
||||
NodeItrComparator> nodeMap;
|
||||
|
||||
for (Graph::ConstNodeItr nItr = other.nodesBegin(),
|
||||
nEnd = other.nodesEnd();
|
||||
nItr != nEnd; ++nItr) {
|
||||
nodeMap[nItr] = addNode(other.getNodeCosts(nItr));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif // LLVM_CODEGEN_PBQP_GRAPH_HPP
|
||||
247
thirdparty/clang/include/llvm/CodeGen/PBQP/HeuristicBase.h
vendored
Normal file
247
thirdparty/clang/include/llvm/CodeGen/PBQP/HeuristicBase.h
vendored
Normal file
@@ -0,0 +1,247 @@
|
||||
//===-- HeuristcBase.h --- Heuristic base class for PBQP --------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_PBQP_HEURISTICBASE_H
|
||||
#define LLVM_CODEGEN_PBQP_HEURISTICBASE_H
|
||||
|
||||
#include "HeuristicSolver.h"
|
||||
|
||||
namespace PBQP {
|
||||
|
||||
/// \brief Abstract base class for heuristic implementations.
|
||||
///
|
||||
/// This class provides a handy base for heuristic implementations with common
|
||||
/// solver behaviour implemented for a number of methods.
|
||||
///
|
||||
/// To implement your own heuristic using this class as a base you'll have to
|
||||
/// implement, as a minimum, the following methods:
|
||||
/// <ul>
|
||||
/// <li> void addToHeuristicList(Graph::NodeItr) : Add a node to the
|
||||
/// heuristic reduction list.
|
||||
/// <li> void heuristicReduce() : Perform a single heuristic reduction.
|
||||
/// <li> void preUpdateEdgeCosts(Graph::EdgeItr) : Handle the (imminent)
|
||||
/// change to the cost matrix on the given edge (by R2).
|
||||
/// <li> void postUpdateEdgeCostts(Graph::EdgeItr) : Handle the new
|
||||
/// costs on the given edge.
|
||||
/// <li> void handleAddEdge(Graph::EdgeItr) : Handle the addition of a new
|
||||
/// edge into the PBQP graph (by R2).
|
||||
/// <li> void handleRemoveEdge(Graph::EdgeItr, Graph::NodeItr) : Handle the
|
||||
/// disconnection of the given edge from the given node.
|
||||
/// <li> A constructor for your derived class : to pass back a reference to
|
||||
/// the solver which is using this heuristic.
|
||||
/// </ul>
|
||||
///
|
||||
/// These methods are implemented in this class for documentation purposes,
|
||||
/// but will assert if called.
|
||||
///
|
||||
/// Note that this class uses the curiously recursive template idiom to
|
||||
/// forward calls to the derived class. These methods need not be made
|
||||
/// virtual, and indeed probably shouldn't for performance reasons.
|
||||
///
|
||||
/// You'll also need to provide NodeData and EdgeData structs in your class.
|
||||
/// These can be used to attach data relevant to your heuristic to each
|
||||
/// node/edge in the PBQP graph.
|
||||
|
||||
template <typename HImpl>
|
||||
class HeuristicBase {
|
||||
private:
|
||||
|
||||
typedef std::list<Graph::NodeItr> OptimalList;
|
||||
|
||||
HeuristicSolverImpl<HImpl> &s;
|
||||
Graph &g;
|
||||
OptimalList optimalList;
|
||||
|
||||
// Return a reference to the derived heuristic.
|
||||
HImpl& impl() { return static_cast<HImpl&>(*this); }
|
||||
|
||||
// Add the given node to the optimal reductions list. Keep an iterator to
|
||||
// its location for fast removal.
|
||||
void addToOptimalReductionList(Graph::NodeItr nItr) {
|
||||
optimalList.insert(optimalList.end(), nItr);
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
/// \brief Construct an instance with a reference to the given solver.
|
||||
/// @param solver The solver which is using this heuristic instance.
|
||||
HeuristicBase(HeuristicSolverImpl<HImpl> &solver)
|
||||
: s(solver), g(s.getGraph()) { }
|
||||
|
||||
/// \brief Get the solver which is using this heuristic instance.
|
||||
/// @return The solver which is using this heuristic instance.
|
||||
///
|
||||
/// You can use this method to get access to the solver in your derived
|
||||
/// heuristic implementation.
|
||||
HeuristicSolverImpl<HImpl>& getSolver() { return s; }
|
||||
|
||||
/// \brief Get the graph representing the problem to be solved.
|
||||
/// @return The graph representing the problem to be solved.
|
||||
Graph& getGraph() { return g; }
|
||||
|
||||
/// \brief Tell the solver to simplify the graph before the reduction phase.
|
||||
/// @return Whether or not the solver should run a simplification phase
|
||||
/// prior to the main setup and reduction.
|
||||
///
|
||||
/// HeuristicBase returns true from this method as it's a sensible default,
|
||||
/// however you can over-ride it in your derived class if you want different
|
||||
/// behaviour.
|
||||
bool solverRunSimplify() const { return true; }
|
||||
|
||||
/// \brief Decide whether a node should be optimally or heuristically
|
||||
/// reduced.
|
||||
/// @return Whether or not the given node should be listed for optimal
|
||||
/// reduction (via R0, R1 or R2).
|
||||
///
|
||||
/// HeuristicBase returns true for any node with degree less than 3. This is
|
||||
/// sane and sensible for many situations, but not all. You can over-ride
|
||||
/// this method in your derived class if you want a different selection
|
||||
/// criteria. Note however that your criteria for selecting optimal nodes
|
||||
/// should be <i>at least</i> as strong as this. I.e. Nodes of degree 3 or
|
||||
/// higher should not be selected under any circumstances.
|
||||
bool shouldOptimallyReduce(Graph::NodeItr nItr) {
|
||||
if (g.getNodeDegree(nItr) < 3)
|
||||
return true;
|
||||
// else
|
||||
return false;
|
||||
}
|
||||
|
||||
/// \brief Add the given node to the list of nodes to be optimally reduced.
|
||||
/// @param nItr Node iterator to be added.
|
||||
///
|
||||
/// You probably don't want to over-ride this, except perhaps to record
|
||||
/// statistics before calling this implementation. HeuristicBase relies on
|
||||
/// its behaviour.
|
||||
void addToOptimalReduceList(Graph::NodeItr nItr) {
|
||||
optimalList.push_back(nItr);
|
||||
}
|
||||
|
||||
/// \brief Initialise the heuristic.
|
||||
///
|
||||
/// HeuristicBase iterates over all nodes in the problem and adds them to
|
||||
/// the appropriate list using addToOptimalReduceList or
|
||||
/// addToHeuristicReduceList based on the result of shouldOptimallyReduce.
|
||||
///
|
||||
/// This behaviour should be fine for most situations.
|
||||
void setup() {
|
||||
for (Graph::NodeItr nItr = g.nodesBegin(), nEnd = g.nodesEnd();
|
||||
nItr != nEnd; ++nItr) {
|
||||
if (impl().shouldOptimallyReduce(nItr)) {
|
||||
addToOptimalReduceList(nItr);
|
||||
} else {
|
||||
impl().addToHeuristicReduceList(nItr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// \brief Optimally reduce one of the nodes in the optimal reduce list.
|
||||
/// @return True if a reduction takes place, false if the optimal reduce
|
||||
/// list is empty.
|
||||
///
|
||||
/// Selects a node from the optimal reduce list and removes it, applying
|
||||
/// R0, R1 or R2 as appropriate based on the selected node's degree.
|
||||
bool optimalReduce() {
|
||||
if (optimalList.empty())
|
||||
return false;
|
||||
|
||||
Graph::NodeItr nItr = optimalList.front();
|
||||
optimalList.pop_front();
|
||||
|
||||
switch (s.getSolverDegree(nItr)) {
|
||||
case 0: s.applyR0(nItr); break;
|
||||
case 1: s.applyR1(nItr); break;
|
||||
case 2: s.applyR2(nItr); break;
|
||||
default: llvm_unreachable(
|
||||
"Optimal reductions of degree > 2 nodes is invalid.");
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/// \brief Perform the PBQP reduction process.
|
||||
///
|
||||
/// Reduces the problem to the empty graph by repeated application of the
|
||||
/// reduction rules R0, R1, R2 and RN.
|
||||
/// R0, R1 or R2 are always applied if possible before RN is used.
|
||||
void reduce() {
|
||||
bool finished = false;
|
||||
|
||||
while (!finished) {
|
||||
if (!optimalReduce()) {
|
||||
if (impl().heuristicReduce()) {
|
||||
getSolver().recordRN();
|
||||
} else {
|
||||
finished = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// \brief Add a node to the heuristic reduce list.
|
||||
/// @param nItr Node iterator to add to the heuristic reduce list.
|
||||
void addToHeuristicList(Graph::NodeItr nItr) {
|
||||
llvm_unreachable("Must be implemented in derived class.");
|
||||
}
|
||||
|
||||
/// \brief Heuristically reduce one of the nodes in the heuristic
|
||||
/// reduce list.
|
||||
/// @return True if a reduction takes place, false if the heuristic reduce
|
||||
/// list is empty.
|
||||
bool heuristicReduce() {
|
||||
llvm_unreachable("Must be implemented in derived class.");
|
||||
return false;
|
||||
}
|
||||
|
||||
/// \brief Prepare a change in the costs on the given edge.
|
||||
/// @param eItr Edge iterator.
|
||||
void preUpdateEdgeCosts(Graph::EdgeItr eItr) {
|
||||
llvm_unreachable("Must be implemented in derived class.");
|
||||
}
|
||||
|
||||
/// \brief Handle the change in the costs on the given edge.
|
||||
/// @param eItr Edge iterator.
|
||||
void postUpdateEdgeCostts(Graph::EdgeItr eItr) {
|
||||
llvm_unreachable("Must be implemented in derived class.");
|
||||
}
|
||||
|
||||
/// \brief Handle the addition of a new edge into the PBQP graph.
|
||||
/// @param eItr Edge iterator for the added edge.
|
||||
void handleAddEdge(Graph::EdgeItr eItr) {
|
||||
llvm_unreachable("Must be implemented in derived class.");
|
||||
}
|
||||
|
||||
/// \brief Handle disconnection of an edge from a node.
|
||||
/// @param eItr Edge iterator for edge being disconnected.
|
||||
/// @param nItr Node iterator for the node being disconnected from.
|
||||
///
|
||||
/// Edges are frequently removed due to the removal of a node. This
|
||||
/// method allows for the effect to be computed only for the remaining
|
||||
/// node in the graph.
|
||||
void handleRemoveEdge(Graph::EdgeItr eItr, Graph::NodeItr nItr) {
|
||||
llvm_unreachable("Must be implemented in derived class.");
|
||||
}
|
||||
|
||||
/// \brief Clean up any structures used by HeuristicBase.
|
||||
///
|
||||
/// At present this just performs a sanity check: that the optimal reduce
|
||||
/// list is empty now that reduction has completed.
|
||||
///
|
||||
/// If your derived class has more complex structures which need tearing
|
||||
/// down you should over-ride this method but include a call back to this
|
||||
/// implementation.
|
||||
void cleanup() {
|
||||
assert(optimalList.empty() && "Nodes left over in optimal reduce list?");
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
||||
#endif // LLVM_CODEGEN_PBQP_HEURISTICBASE_H
|
||||
616
thirdparty/clang/include/llvm/CodeGen/PBQP/HeuristicSolver.h
vendored
Normal file
616
thirdparty/clang/include/llvm/CodeGen/PBQP/HeuristicSolver.h
vendored
Normal file
@@ -0,0 +1,616 @@
|
||||
//===-- HeuristicSolver.h - Heuristic PBQP Solver --------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Heuristic PBQP solver. This solver is able to perform optimal reductions for
|
||||
// nodes of degree 0, 1 or 2. For nodes of degree >2 a plugable heuristic is
|
||||
// used to select a node for reduction.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_PBQP_HEURISTICSOLVER_H
|
||||
#define LLVM_CODEGEN_PBQP_HEURISTICSOLVER_H
|
||||
|
||||
#include "Graph.h"
|
||||
#include "Solution.h"
|
||||
#include <limits>
|
||||
#include <vector>
|
||||
|
||||
namespace PBQP {
|
||||
|
||||
/// \brief Heuristic PBQP solver implementation.
|
||||
///
|
||||
/// This class should usually be created (and destroyed) indirectly via a call
|
||||
/// to HeuristicSolver<HImpl>::solve(Graph&).
|
||||
/// See the comments for HeuristicSolver.
|
||||
///
|
||||
/// HeuristicSolverImpl provides the R0, R1 and R2 reduction rules,
|
||||
/// backpropagation phase, and maintains the internal copy of the graph on
|
||||
/// which the reduction is carried out (the original being kept to facilitate
|
||||
/// backpropagation).
|
||||
template <typename HImpl>
|
||||
class HeuristicSolverImpl {
|
||||
private:
|
||||
|
||||
typedef typename HImpl::NodeData HeuristicNodeData;
|
||||
typedef typename HImpl::EdgeData HeuristicEdgeData;
|
||||
|
||||
typedef std::list<Graph::EdgeItr> SolverEdges;
|
||||
|
||||
public:
|
||||
|
||||
/// \brief Iterator type for edges in the solver graph.
|
||||
typedef SolverEdges::iterator SolverEdgeItr;
|
||||
|
||||
private:
|
||||
|
||||
class NodeData {
|
||||
public:
|
||||
NodeData() : solverDegree(0) {}
|
||||
|
||||
HeuristicNodeData& getHeuristicData() { return hData; }
|
||||
|
||||
SolverEdgeItr addSolverEdge(Graph::EdgeItr eItr) {
|
||||
++solverDegree;
|
||||
return solverEdges.insert(solverEdges.end(), eItr);
|
||||
}
|
||||
|
||||
void removeSolverEdge(SolverEdgeItr seItr) {
|
||||
--solverDegree;
|
||||
solverEdges.erase(seItr);
|
||||
}
|
||||
|
||||
SolverEdgeItr solverEdgesBegin() { return solverEdges.begin(); }
|
||||
SolverEdgeItr solverEdgesEnd() { return solverEdges.end(); }
|
||||
unsigned getSolverDegree() const { return solverDegree; }
|
||||
void clearSolverEdges() {
|
||||
solverDegree = 0;
|
||||
solverEdges.clear();
|
||||
}
|
||||
|
||||
private:
|
||||
HeuristicNodeData hData;
|
||||
unsigned solverDegree;
|
||||
SolverEdges solverEdges;
|
||||
};
|
||||
|
||||
class EdgeData {
|
||||
public:
|
||||
HeuristicEdgeData& getHeuristicData() { return hData; }
|
||||
|
||||
void setN1SolverEdgeItr(SolverEdgeItr n1SolverEdgeItr) {
|
||||
this->n1SolverEdgeItr = n1SolverEdgeItr;
|
||||
}
|
||||
|
||||
SolverEdgeItr getN1SolverEdgeItr() { return n1SolverEdgeItr; }
|
||||
|
||||
void setN2SolverEdgeItr(SolverEdgeItr n2SolverEdgeItr){
|
||||
this->n2SolverEdgeItr = n2SolverEdgeItr;
|
||||
}
|
||||
|
||||
SolverEdgeItr getN2SolverEdgeItr() { return n2SolverEdgeItr; }
|
||||
|
||||
private:
|
||||
|
||||
HeuristicEdgeData hData;
|
||||
SolverEdgeItr n1SolverEdgeItr, n2SolverEdgeItr;
|
||||
};
|
||||
|
||||
Graph &g;
|
||||
HImpl h;
|
||||
Solution s;
|
||||
std::vector<Graph::NodeItr> stack;
|
||||
|
||||
typedef std::list<NodeData> NodeDataList;
|
||||
NodeDataList nodeDataList;
|
||||
|
||||
typedef std::list<EdgeData> EdgeDataList;
|
||||
EdgeDataList edgeDataList;
|
||||
|
||||
public:
|
||||
|
||||
/// \brief Construct a heuristic solver implementation to solve the given
|
||||
/// graph.
|
||||
/// @param g The graph representing the problem instance to be solved.
|
||||
HeuristicSolverImpl(Graph &g) : g(g), h(*this) {}
|
||||
|
||||
/// \brief Get the graph being solved by this solver.
|
||||
/// @return The graph representing the problem instance being solved by this
|
||||
/// solver.
|
||||
Graph& getGraph() { return g; }
|
||||
|
||||
/// \brief Get the heuristic data attached to the given node.
|
||||
/// @param nItr Node iterator.
|
||||
/// @return The heuristic data attached to the given node.
|
||||
HeuristicNodeData& getHeuristicNodeData(Graph::NodeItr nItr) {
|
||||
return getSolverNodeData(nItr).getHeuristicData();
|
||||
}
|
||||
|
||||
/// \brief Get the heuristic data attached to the given edge.
|
||||
/// @param eItr Edge iterator.
|
||||
/// @return The heuristic data attached to the given node.
|
||||
HeuristicEdgeData& getHeuristicEdgeData(Graph::EdgeItr eItr) {
|
||||
return getSolverEdgeData(eItr).getHeuristicData();
|
||||
}
|
||||
|
||||
/// \brief Begin iterator for the set of edges adjacent to the given node in
|
||||
/// the solver graph.
|
||||
/// @param nItr Node iterator.
|
||||
/// @return Begin iterator for the set of edges adjacent to the given node
|
||||
/// in the solver graph.
|
||||
SolverEdgeItr solverEdgesBegin(Graph::NodeItr nItr) {
|
||||
return getSolverNodeData(nItr).solverEdgesBegin();
|
||||
}
|
||||
|
||||
/// \brief End iterator for the set of edges adjacent to the given node in
|
||||
/// the solver graph.
|
||||
/// @param nItr Node iterator.
|
||||
/// @return End iterator for the set of edges adjacent to the given node in
|
||||
/// the solver graph.
|
||||
SolverEdgeItr solverEdgesEnd(Graph::NodeItr nItr) {
|
||||
return getSolverNodeData(nItr).solverEdgesEnd();
|
||||
}
|
||||
|
||||
/// \brief Remove a node from the solver graph.
|
||||
/// @param eItr Edge iterator for edge to be removed.
|
||||
///
|
||||
/// Does <i>not</i> notify the heuristic of the removal. That should be
|
||||
/// done manually if necessary.
|
||||
void removeSolverEdge(Graph::EdgeItr eItr) {
|
||||
EdgeData &eData = getSolverEdgeData(eItr);
|
||||
NodeData &n1Data = getSolverNodeData(g.getEdgeNode1(eItr)),
|
||||
&n2Data = getSolverNodeData(g.getEdgeNode2(eItr));
|
||||
|
||||
n1Data.removeSolverEdge(eData.getN1SolverEdgeItr());
|
||||
n2Data.removeSolverEdge(eData.getN2SolverEdgeItr());
|
||||
}
|
||||
|
||||
/// \brief Compute a solution to the PBQP problem instance with which this
|
||||
/// heuristic solver was constructed.
|
||||
/// @return A solution to the PBQP problem.
|
||||
///
|
||||
/// Performs the full PBQP heuristic solver algorithm, including setup,
|
||||
/// calls to the heuristic (which will call back to the reduction rules in
|
||||
/// this class), and cleanup.
|
||||
Solution computeSolution() {
|
||||
setup();
|
||||
h.setup();
|
||||
h.reduce();
|
||||
backpropagate();
|
||||
h.cleanup();
|
||||
cleanup();
|
||||
return s;
|
||||
}
|
||||
|
||||
/// \brief Add to the end of the stack.
|
||||
/// @param nItr Node iterator to add to the reduction stack.
|
||||
void pushToStack(Graph::NodeItr nItr) {
|
||||
getSolverNodeData(nItr).clearSolverEdges();
|
||||
stack.push_back(nItr);
|
||||
}
|
||||
|
||||
/// \brief Returns the solver degree of the given node.
|
||||
/// @param nItr Node iterator for which degree is requested.
|
||||
/// @return Node degree in the <i>solver</i> graph (not the original graph).
|
||||
unsigned getSolverDegree(Graph::NodeItr nItr) {
|
||||
return getSolverNodeData(nItr).getSolverDegree();
|
||||
}
|
||||
|
||||
/// \brief Set the solution of the given node.
|
||||
/// @param nItr Node iterator to set solution for.
|
||||
/// @param selection Selection for node.
|
||||
void setSolution(const Graph::NodeItr &nItr, unsigned selection) {
|
||||
s.setSelection(nItr, selection);
|
||||
|
||||
for (Graph::AdjEdgeItr aeItr = g.adjEdgesBegin(nItr),
|
||||
aeEnd = g.adjEdgesEnd(nItr);
|
||||
aeItr != aeEnd; ++aeItr) {
|
||||
Graph::EdgeItr eItr(*aeItr);
|
||||
Graph::NodeItr anItr(g.getEdgeOtherNode(eItr, nItr));
|
||||
getSolverNodeData(anItr).addSolverEdge(eItr);
|
||||
}
|
||||
}
|
||||
|
||||
/// \brief Apply rule R0.
|
||||
/// @param nItr Node iterator for node to apply R0 to.
|
||||
///
|
||||
/// Node will be automatically pushed to the solver stack.
|
||||
void applyR0(Graph::NodeItr nItr) {
|
||||
assert(getSolverNodeData(nItr).getSolverDegree() == 0 &&
|
||||
"R0 applied to node with degree != 0.");
|
||||
|
||||
// Nothing to do. Just push the node onto the reduction stack.
|
||||
pushToStack(nItr);
|
||||
|
||||
s.recordR0();
|
||||
}
|
||||
|
||||
/// \brief Apply rule R1.
|
||||
/// @param xnItr Node iterator for node to apply R1 to.
|
||||
///
|
||||
/// Node will be automatically pushed to the solver stack.
|
||||
void applyR1(Graph::NodeItr xnItr) {
|
||||
NodeData &nd = getSolverNodeData(xnItr);
|
||||
assert(nd.getSolverDegree() == 1 &&
|
||||
"R1 applied to node with degree != 1.");
|
||||
|
||||
Graph::EdgeItr eItr = *nd.solverEdgesBegin();
|
||||
|
||||
const Matrix &eCosts = g.getEdgeCosts(eItr);
|
||||
const Vector &xCosts = g.getNodeCosts(xnItr);
|
||||
|
||||
// Duplicate a little to avoid transposing matrices.
|
||||
if (xnItr == g.getEdgeNode1(eItr)) {
|
||||
Graph::NodeItr ynItr = g.getEdgeNode2(eItr);
|
||||
Vector &yCosts = g.getNodeCosts(ynItr);
|
||||
for (unsigned j = 0; j < yCosts.getLength(); ++j) {
|
||||
PBQPNum min = eCosts[0][j] + xCosts[0];
|
||||
for (unsigned i = 1; i < xCosts.getLength(); ++i) {
|
||||
PBQPNum c = eCosts[i][j] + xCosts[i];
|
||||
if (c < min)
|
||||
min = c;
|
||||
}
|
||||
yCosts[j] += min;
|
||||
}
|
||||
h.handleRemoveEdge(eItr, ynItr);
|
||||
} else {
|
||||
Graph::NodeItr ynItr = g.getEdgeNode1(eItr);
|
||||
Vector &yCosts = g.getNodeCosts(ynItr);
|
||||
for (unsigned i = 0; i < yCosts.getLength(); ++i) {
|
||||
PBQPNum min = eCosts[i][0] + xCosts[0];
|
||||
for (unsigned j = 1; j < xCosts.getLength(); ++j) {
|
||||
PBQPNum c = eCosts[i][j] + xCosts[j];
|
||||
if (c < min)
|
||||
min = c;
|
||||
}
|
||||
yCosts[i] += min;
|
||||
}
|
||||
h.handleRemoveEdge(eItr, ynItr);
|
||||
}
|
||||
removeSolverEdge(eItr);
|
||||
assert(nd.getSolverDegree() == 0 &&
|
||||
"Degree 1 with edge removed should be 0.");
|
||||
pushToStack(xnItr);
|
||||
s.recordR1();
|
||||
}
|
||||
|
||||
/// \brief Apply rule R2.
|
||||
/// @param xnItr Node iterator for node to apply R2 to.
|
||||
///
|
||||
/// Node will be automatically pushed to the solver stack.
|
||||
void applyR2(Graph::NodeItr xnItr) {
|
||||
assert(getSolverNodeData(xnItr).getSolverDegree() == 2 &&
|
||||
"R2 applied to node with degree != 2.");
|
||||
|
||||
NodeData &nd = getSolverNodeData(xnItr);
|
||||
const Vector &xCosts = g.getNodeCosts(xnItr);
|
||||
|
||||
SolverEdgeItr aeItr = nd.solverEdgesBegin();
|
||||
Graph::EdgeItr yxeItr = *aeItr,
|
||||
zxeItr = *(++aeItr);
|
||||
|
||||
Graph::NodeItr ynItr = g.getEdgeOtherNode(yxeItr, xnItr),
|
||||
znItr = g.getEdgeOtherNode(zxeItr, xnItr);
|
||||
|
||||
bool flipEdge1 = (g.getEdgeNode1(yxeItr) == xnItr),
|
||||
flipEdge2 = (g.getEdgeNode1(zxeItr) == xnItr);
|
||||
|
||||
const Matrix *yxeCosts = flipEdge1 ?
|
||||
new Matrix(g.getEdgeCosts(yxeItr).transpose()) :
|
||||
&g.getEdgeCosts(yxeItr);
|
||||
|
||||
const Matrix *zxeCosts = flipEdge2 ?
|
||||
new Matrix(g.getEdgeCosts(zxeItr).transpose()) :
|
||||
&g.getEdgeCosts(zxeItr);
|
||||
|
||||
unsigned xLen = xCosts.getLength(),
|
||||
yLen = yxeCosts->getRows(),
|
||||
zLen = zxeCosts->getRows();
|
||||
|
||||
Matrix delta(yLen, zLen);
|
||||
|
||||
for (unsigned i = 0; i < yLen; ++i) {
|
||||
for (unsigned j = 0; j < zLen; ++j) {
|
||||
PBQPNum min = (*yxeCosts)[i][0] + (*zxeCosts)[j][0] + xCosts[0];
|
||||
for (unsigned k = 1; k < xLen; ++k) {
|
||||
PBQPNum c = (*yxeCosts)[i][k] + (*zxeCosts)[j][k] + xCosts[k];
|
||||
if (c < min) {
|
||||
min = c;
|
||||
}
|
||||
}
|
||||
delta[i][j] = min;
|
||||
}
|
||||
}
|
||||
|
||||
if (flipEdge1)
|
||||
delete yxeCosts;
|
||||
|
||||
if (flipEdge2)
|
||||
delete zxeCosts;
|
||||
|
||||
Graph::EdgeItr yzeItr = g.findEdge(ynItr, znItr);
|
||||
bool addedEdge = false;
|
||||
|
||||
if (yzeItr == g.edgesEnd()) {
|
||||
yzeItr = g.addEdge(ynItr, znItr, delta);
|
||||
addedEdge = true;
|
||||
} else {
|
||||
Matrix &yzeCosts = g.getEdgeCosts(yzeItr);
|
||||
h.preUpdateEdgeCosts(yzeItr);
|
||||
if (ynItr == g.getEdgeNode1(yzeItr)) {
|
||||
yzeCosts += delta;
|
||||
} else {
|
||||
yzeCosts += delta.transpose();
|
||||
}
|
||||
}
|
||||
|
||||
bool nullCostEdge = tryNormaliseEdgeMatrix(yzeItr);
|
||||
|
||||
if (!addedEdge) {
|
||||
// If we modified the edge costs let the heuristic know.
|
||||
h.postUpdateEdgeCosts(yzeItr);
|
||||
}
|
||||
|
||||
if (nullCostEdge) {
|
||||
// If this edge ended up null remove it.
|
||||
if (!addedEdge) {
|
||||
// We didn't just add it, so we need to notify the heuristic
|
||||
// and remove it from the solver.
|
||||
h.handleRemoveEdge(yzeItr, ynItr);
|
||||
h.handleRemoveEdge(yzeItr, znItr);
|
||||
removeSolverEdge(yzeItr);
|
||||
}
|
||||
g.removeEdge(yzeItr);
|
||||
} else if (addedEdge) {
|
||||
// If the edge was added, and non-null, finish setting it up, add it to
|
||||
// the solver & notify heuristic.
|
||||
edgeDataList.push_back(EdgeData());
|
||||
g.setEdgeData(yzeItr, &edgeDataList.back());
|
||||
addSolverEdge(yzeItr);
|
||||
h.handleAddEdge(yzeItr);
|
||||
}
|
||||
|
||||
h.handleRemoveEdge(yxeItr, ynItr);
|
||||
removeSolverEdge(yxeItr);
|
||||
h.handleRemoveEdge(zxeItr, znItr);
|
||||
removeSolverEdge(zxeItr);
|
||||
|
||||
pushToStack(xnItr);
|
||||
s.recordR2();
|
||||
}
|
||||
|
||||
/// \brief Record an application of the RN rule.
|
||||
///
|
||||
/// For use by the HeuristicBase.
|
||||
void recordRN() { s.recordRN(); }
|
||||
|
||||
private:
|
||||
|
||||
NodeData& getSolverNodeData(Graph::NodeItr nItr) {
|
||||
return *static_cast<NodeData*>(g.getNodeData(nItr));
|
||||
}
|
||||
|
||||
EdgeData& getSolverEdgeData(Graph::EdgeItr eItr) {
|
||||
return *static_cast<EdgeData*>(g.getEdgeData(eItr));
|
||||
}
|
||||
|
||||
void addSolverEdge(Graph::EdgeItr eItr) {
|
||||
EdgeData &eData = getSolverEdgeData(eItr);
|
||||
NodeData &n1Data = getSolverNodeData(g.getEdgeNode1(eItr)),
|
||||
&n2Data = getSolverNodeData(g.getEdgeNode2(eItr));
|
||||
|
||||
eData.setN1SolverEdgeItr(n1Data.addSolverEdge(eItr));
|
||||
eData.setN2SolverEdgeItr(n2Data.addSolverEdge(eItr));
|
||||
}
|
||||
|
||||
void setup() {
|
||||
if (h.solverRunSimplify()) {
|
||||
simplify();
|
||||
}
|
||||
|
||||
// Create node data objects.
|
||||
for (Graph::NodeItr nItr = g.nodesBegin(), nEnd = g.nodesEnd();
|
||||
nItr != nEnd; ++nItr) {
|
||||
nodeDataList.push_back(NodeData());
|
||||
g.setNodeData(nItr, &nodeDataList.back());
|
||||
}
|
||||
|
||||
// Create edge data objects.
|
||||
for (Graph::EdgeItr eItr = g.edgesBegin(), eEnd = g.edgesEnd();
|
||||
eItr != eEnd; ++eItr) {
|
||||
edgeDataList.push_back(EdgeData());
|
||||
g.setEdgeData(eItr, &edgeDataList.back());
|
||||
addSolverEdge(eItr);
|
||||
}
|
||||
}
|
||||
|
||||
void simplify() {
|
||||
disconnectTrivialNodes();
|
||||
eliminateIndependentEdges();
|
||||
}
|
||||
|
||||
// Eliminate trivial nodes.
|
||||
void disconnectTrivialNodes() {
|
||||
unsigned numDisconnected = 0;
|
||||
|
||||
for (Graph::NodeItr nItr = g.nodesBegin(), nEnd = g.nodesEnd();
|
||||
nItr != nEnd; ++nItr) {
|
||||
|
||||
if (g.getNodeCosts(nItr).getLength() == 1) {
|
||||
|
||||
std::vector<Graph::EdgeItr> edgesToRemove;
|
||||
|
||||
for (Graph::AdjEdgeItr aeItr = g.adjEdgesBegin(nItr),
|
||||
aeEnd = g.adjEdgesEnd(nItr);
|
||||
aeItr != aeEnd; ++aeItr) {
|
||||
|
||||
Graph::EdgeItr eItr = *aeItr;
|
||||
|
||||
if (g.getEdgeNode1(eItr) == nItr) {
|
||||
Graph::NodeItr otherNodeItr = g.getEdgeNode2(eItr);
|
||||
g.getNodeCosts(otherNodeItr) +=
|
||||
g.getEdgeCosts(eItr).getRowAsVector(0);
|
||||
}
|
||||
else {
|
||||
Graph::NodeItr otherNodeItr = g.getEdgeNode1(eItr);
|
||||
g.getNodeCosts(otherNodeItr) +=
|
||||
g.getEdgeCosts(eItr).getColAsVector(0);
|
||||
}
|
||||
|
||||
edgesToRemove.push_back(eItr);
|
||||
}
|
||||
|
||||
if (!edgesToRemove.empty())
|
||||
++numDisconnected;
|
||||
|
||||
while (!edgesToRemove.empty()) {
|
||||
g.removeEdge(edgesToRemove.back());
|
||||
edgesToRemove.pop_back();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void eliminateIndependentEdges() {
|
||||
std::vector<Graph::EdgeItr> edgesToProcess;
|
||||
unsigned numEliminated = 0;
|
||||
|
||||
for (Graph::EdgeItr eItr = g.edgesBegin(), eEnd = g.edgesEnd();
|
||||
eItr != eEnd; ++eItr) {
|
||||
edgesToProcess.push_back(eItr);
|
||||
}
|
||||
|
||||
while (!edgesToProcess.empty()) {
|
||||
if (tryToEliminateEdge(edgesToProcess.back()))
|
||||
++numEliminated;
|
||||
edgesToProcess.pop_back();
|
||||
}
|
||||
}
|
||||
|
||||
bool tryToEliminateEdge(Graph::EdgeItr eItr) {
|
||||
if (tryNormaliseEdgeMatrix(eItr)) {
|
||||
g.removeEdge(eItr);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool tryNormaliseEdgeMatrix(Graph::EdgeItr &eItr) {
|
||||
|
||||
const PBQPNum infinity = std::numeric_limits<PBQPNum>::infinity();
|
||||
|
||||
Matrix &edgeCosts = g.getEdgeCosts(eItr);
|
||||
Vector &uCosts = g.getNodeCosts(g.getEdgeNode1(eItr)),
|
||||
&vCosts = g.getNodeCosts(g.getEdgeNode2(eItr));
|
||||
|
||||
for (unsigned r = 0; r < edgeCosts.getRows(); ++r) {
|
||||
PBQPNum rowMin = infinity;
|
||||
|
||||
for (unsigned c = 0; c < edgeCosts.getCols(); ++c) {
|
||||
if (vCosts[c] != infinity && edgeCosts[r][c] < rowMin)
|
||||
rowMin = edgeCosts[r][c];
|
||||
}
|
||||
|
||||
uCosts[r] += rowMin;
|
||||
|
||||
if (rowMin != infinity) {
|
||||
edgeCosts.subFromRow(r, rowMin);
|
||||
}
|
||||
else {
|
||||
edgeCosts.setRow(r, 0);
|
||||
}
|
||||
}
|
||||
|
||||
for (unsigned c = 0; c < edgeCosts.getCols(); ++c) {
|
||||
PBQPNum colMin = infinity;
|
||||
|
||||
for (unsigned r = 0; r < edgeCosts.getRows(); ++r) {
|
||||
if (uCosts[r] != infinity && edgeCosts[r][c] < colMin)
|
||||
colMin = edgeCosts[r][c];
|
||||
}
|
||||
|
||||
vCosts[c] += colMin;
|
||||
|
||||
if (colMin != infinity) {
|
||||
edgeCosts.subFromCol(c, colMin);
|
||||
}
|
||||
else {
|
||||
edgeCosts.setCol(c, 0);
|
||||
}
|
||||
}
|
||||
|
||||
return edgeCosts.isZero();
|
||||
}
|
||||
|
||||
void backpropagate() {
|
||||
while (!stack.empty()) {
|
||||
computeSolution(stack.back());
|
||||
stack.pop_back();
|
||||
}
|
||||
}
|
||||
|
||||
void computeSolution(Graph::NodeItr nItr) {
|
||||
|
||||
NodeData &nodeData = getSolverNodeData(nItr);
|
||||
|
||||
Vector v(g.getNodeCosts(nItr));
|
||||
|
||||
// Solve based on existing solved edges.
|
||||
for (SolverEdgeItr solvedEdgeItr = nodeData.solverEdgesBegin(),
|
||||
solvedEdgeEnd = nodeData.solverEdgesEnd();
|
||||
solvedEdgeItr != solvedEdgeEnd; ++solvedEdgeItr) {
|
||||
|
||||
Graph::EdgeItr eItr(*solvedEdgeItr);
|
||||
Matrix &edgeCosts = g.getEdgeCosts(eItr);
|
||||
|
||||
if (nItr == g.getEdgeNode1(eItr)) {
|
||||
Graph::NodeItr adjNode(g.getEdgeNode2(eItr));
|
||||
unsigned adjSolution = s.getSelection(adjNode);
|
||||
v += edgeCosts.getColAsVector(adjSolution);
|
||||
}
|
||||
else {
|
||||
Graph::NodeItr adjNode(g.getEdgeNode1(eItr));
|
||||
unsigned adjSolution = s.getSelection(adjNode);
|
||||
v += edgeCosts.getRowAsVector(adjSolution);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
setSolution(nItr, v.minIndex());
|
||||
}
|
||||
|
||||
void cleanup() {
|
||||
h.cleanup();
|
||||
nodeDataList.clear();
|
||||
edgeDataList.clear();
|
||||
}
|
||||
};
|
||||
|
||||
/// \brief PBQP heuristic solver class.
|
||||
///
|
||||
/// Given a PBQP Graph g representing a PBQP problem, you can find a solution
|
||||
/// by calling
|
||||
/// <tt>Solution s = HeuristicSolver<H>::solve(g);</tt>
|
||||
///
|
||||
/// The choice of heuristic for the H parameter will affect both the solver
|
||||
/// speed and solution quality. The heuristic should be chosen based on the
|
||||
/// nature of the problem being solved.
|
||||
/// Currently the only solver included with LLVM is the Briggs heuristic for
|
||||
/// register allocation.
|
||||
template <typename HImpl>
|
||||
class HeuristicSolver {
|
||||
public:
|
||||
static Solution solve(Graph &g) {
|
||||
HeuristicSolverImpl<HImpl> hs(g);
|
||||
return hs.computeSolution();
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif // LLVM_CODEGEN_PBQP_HEURISTICSOLVER_H
|
||||
468
thirdparty/clang/include/llvm/CodeGen/PBQP/Heuristics/Briggs.h
vendored
Normal file
468
thirdparty/clang/include/llvm/CodeGen/PBQP/Heuristics/Briggs.h
vendored
Normal file
@@ -0,0 +1,468 @@
|
||||
//===-- Briggs.h --- Briggs Heuristic for PBQP ------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This class implements the Briggs test for "allocability" of nodes in a
|
||||
// PBQP graph representing a register allocation problem. Nodes which can be
|
||||
// proven allocable (by a safe and relatively accurate test) are removed from
|
||||
// the PBQP graph first. If no provably allocable node is present in the graph
|
||||
// then the node with the minimal spill-cost to degree ratio is removed.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_PBQP_HEURISTICS_BRIGGS_H
|
||||
#define LLVM_CODEGEN_PBQP_HEURISTICS_BRIGGS_H
|
||||
|
||||
#include "../HeuristicBase.h"
|
||||
#include "../HeuristicSolver.h"
|
||||
#include <limits>
|
||||
|
||||
namespace PBQP {
|
||||
namespace Heuristics {
|
||||
|
||||
/// \brief PBQP Heuristic which applies an allocability test based on
|
||||
/// Briggs.
|
||||
///
|
||||
/// This heuristic assumes that the elements of cost vectors in the PBQP
|
||||
/// problem represent storage options, with the first being the spill
|
||||
/// option and subsequent elements representing legal registers for the
|
||||
/// corresponding node. Edge cost matrices are likewise assumed to represent
|
||||
/// register constraints.
|
||||
/// If one or more nodes can be proven allocable by this heuristic (by
|
||||
/// inspection of their constraint matrices) then the allocable node of
|
||||
/// highest degree is selected for the next reduction and pushed to the
|
||||
/// solver stack. If no nodes can be proven allocable then the node with
|
||||
/// the lowest estimated spill cost is selected and push to the solver stack
|
||||
/// instead.
|
||||
///
|
||||
/// This implementation is built on top of HeuristicBase.
|
||||
class Briggs : public HeuristicBase<Briggs> {
|
||||
private:
|
||||
|
||||
class LinkDegreeComparator {
|
||||
public:
|
||||
LinkDegreeComparator(HeuristicSolverImpl<Briggs> &s) : s(&s) {}
|
||||
bool operator()(Graph::NodeItr n1Itr, Graph::NodeItr n2Itr) const {
|
||||
if (s->getSolverDegree(n1Itr) > s->getSolverDegree(n2Itr))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
private:
|
||||
HeuristicSolverImpl<Briggs> *s;
|
||||
};
|
||||
|
||||
class SpillCostComparator {
|
||||
public:
|
||||
SpillCostComparator(HeuristicSolverImpl<Briggs> &s)
|
||||
: s(&s), g(&s.getGraph()) {}
|
||||
bool operator()(Graph::NodeItr n1Itr, Graph::NodeItr n2Itr) const {
|
||||
const PBQP::Vector &cv1 = g->getNodeCosts(n1Itr);
|
||||
const PBQP::Vector &cv2 = g->getNodeCosts(n2Itr);
|
||||
|
||||
PBQPNum cost1 = cv1[0] / s->getSolverDegree(n1Itr);
|
||||
PBQPNum cost2 = cv2[0] / s->getSolverDegree(n2Itr);
|
||||
|
||||
if (cost1 < cost2)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
private:
|
||||
HeuristicSolverImpl<Briggs> *s;
|
||||
Graph *g;
|
||||
};
|
||||
|
||||
typedef std::list<Graph::NodeItr> RNAllocableList;
|
||||
typedef RNAllocableList::iterator RNAllocableListItr;
|
||||
|
||||
typedef std::list<Graph::NodeItr> RNUnallocableList;
|
||||
typedef RNUnallocableList::iterator RNUnallocableListItr;
|
||||
|
||||
public:
|
||||
|
||||
struct NodeData {
|
||||
typedef std::vector<unsigned> UnsafeDegreesArray;
|
||||
bool isHeuristic, isAllocable, isInitialized;
|
||||
unsigned numDenied, numSafe;
|
||||
UnsafeDegreesArray unsafeDegrees;
|
||||
RNAllocableListItr rnaItr;
|
||||
RNUnallocableListItr rnuItr;
|
||||
|
||||
NodeData()
|
||||
: isHeuristic(false), isAllocable(false), isInitialized(false),
|
||||
numDenied(0), numSafe(0) { }
|
||||
};
|
||||
|
||||
struct EdgeData {
|
||||
typedef std::vector<unsigned> UnsafeArray;
|
||||
unsigned worst, reverseWorst;
|
||||
UnsafeArray unsafe, reverseUnsafe;
|
||||
bool isUpToDate;
|
||||
|
||||
EdgeData() : worst(0), reverseWorst(0), isUpToDate(false) {}
|
||||
};
|
||||
|
||||
/// \brief Construct an instance of the Briggs heuristic.
|
||||
/// @param solver A reference to the solver which is using this heuristic.
|
||||
Briggs(HeuristicSolverImpl<Briggs> &solver) :
|
||||
HeuristicBase<Briggs>(solver) {}
|
||||
|
||||
/// \brief Determine whether a node should be reduced using optimal
|
||||
/// reduction.
|
||||
/// @param nItr Node iterator to be considered.
|
||||
/// @return True if the given node should be optimally reduced, false
|
||||
/// otherwise.
|
||||
///
|
||||
/// Selects nodes of degree 0, 1 or 2 for optimal reduction, with one
|
||||
/// exception. Nodes whose spill cost (element 0 of their cost vector) is
|
||||
/// infinite are checked for allocability first. Allocable nodes may be
|
||||
/// optimally reduced, but nodes whose allocability cannot be proven are
|
||||
/// selected for heuristic reduction instead.
|
||||
bool shouldOptimallyReduce(Graph::NodeItr nItr) {
|
||||
if (getSolver().getSolverDegree(nItr) < 3) {
|
||||
return true;
|
||||
}
|
||||
// else
|
||||
return false;
|
||||
}
|
||||
|
||||
/// \brief Add a node to the heuristic reduce list.
|
||||
/// @param nItr Node iterator to add to the heuristic reduce list.
|
||||
void addToHeuristicReduceList(Graph::NodeItr nItr) {
|
||||
NodeData &nd = getHeuristicNodeData(nItr);
|
||||
initializeNode(nItr);
|
||||
nd.isHeuristic = true;
|
||||
if (nd.isAllocable) {
|
||||
nd.rnaItr = rnAllocableList.insert(rnAllocableList.end(), nItr);
|
||||
} else {
|
||||
nd.rnuItr = rnUnallocableList.insert(rnUnallocableList.end(), nItr);
|
||||
}
|
||||
}
|
||||
|
||||
/// \brief Heuristically reduce one of the nodes in the heuristic
|
||||
/// reduce list.
|
||||
/// @return True if a reduction takes place, false if the heuristic reduce
|
||||
/// list is empty.
|
||||
///
|
||||
/// If the list of allocable nodes is non-empty a node is selected
|
||||
/// from it and pushed to the stack. Otherwise if the non-allocable list
|
||||
/// is non-empty a node is selected from it and pushed to the stack.
|
||||
/// If both lists are empty the method simply returns false with no action
|
||||
/// taken.
|
||||
bool heuristicReduce() {
|
||||
if (!rnAllocableList.empty()) {
|
||||
RNAllocableListItr rnaItr =
|
||||
min_element(rnAllocableList.begin(), rnAllocableList.end(),
|
||||
LinkDegreeComparator(getSolver()));
|
||||
Graph::NodeItr nItr = *rnaItr;
|
||||
rnAllocableList.erase(rnaItr);
|
||||
handleRemoveNode(nItr);
|
||||
getSolver().pushToStack(nItr);
|
||||
return true;
|
||||
} else if (!rnUnallocableList.empty()) {
|
||||
RNUnallocableListItr rnuItr =
|
||||
min_element(rnUnallocableList.begin(), rnUnallocableList.end(),
|
||||
SpillCostComparator(getSolver()));
|
||||
Graph::NodeItr nItr = *rnuItr;
|
||||
rnUnallocableList.erase(rnuItr);
|
||||
handleRemoveNode(nItr);
|
||||
getSolver().pushToStack(nItr);
|
||||
return true;
|
||||
}
|
||||
// else
|
||||
return false;
|
||||
}
|
||||
|
||||
/// \brief Prepare a change in the costs on the given edge.
|
||||
/// @param eItr Edge iterator.
|
||||
void preUpdateEdgeCosts(Graph::EdgeItr eItr) {
|
||||
Graph &g = getGraph();
|
||||
Graph::NodeItr n1Itr = g.getEdgeNode1(eItr),
|
||||
n2Itr = g.getEdgeNode2(eItr);
|
||||
NodeData &n1 = getHeuristicNodeData(n1Itr),
|
||||
&n2 = getHeuristicNodeData(n2Itr);
|
||||
|
||||
if (n1.isHeuristic)
|
||||
subtractEdgeContributions(eItr, getGraph().getEdgeNode1(eItr));
|
||||
if (n2.isHeuristic)
|
||||
subtractEdgeContributions(eItr, getGraph().getEdgeNode2(eItr));
|
||||
|
||||
EdgeData &ed = getHeuristicEdgeData(eItr);
|
||||
ed.isUpToDate = false;
|
||||
}
|
||||
|
||||
/// \brief Handle the change in the costs on the given edge.
|
||||
/// @param eItr Edge iterator.
|
||||
void postUpdateEdgeCosts(Graph::EdgeItr eItr) {
|
||||
// This is effectively the same as adding a new edge now, since
|
||||
// we've factored out the costs of the old one.
|
||||
handleAddEdge(eItr);
|
||||
}
|
||||
|
||||
/// \brief Handle the addition of a new edge into the PBQP graph.
|
||||
/// @param eItr Edge iterator for the added edge.
|
||||
///
|
||||
/// Updates allocability of any nodes connected by this edge which are
|
||||
/// being managed by the heuristic. If allocability changes they are
|
||||
/// moved to the appropriate list.
|
||||
void handleAddEdge(Graph::EdgeItr eItr) {
|
||||
Graph &g = getGraph();
|
||||
Graph::NodeItr n1Itr = g.getEdgeNode1(eItr),
|
||||
n2Itr = g.getEdgeNode2(eItr);
|
||||
NodeData &n1 = getHeuristicNodeData(n1Itr),
|
||||
&n2 = getHeuristicNodeData(n2Itr);
|
||||
|
||||
// If neither node is managed by the heuristic there's nothing to be
|
||||
// done.
|
||||
if (!n1.isHeuristic && !n2.isHeuristic)
|
||||
return;
|
||||
|
||||
// Ok - we need to update at least one node.
|
||||
computeEdgeContributions(eItr);
|
||||
|
||||
// Update node 1 if it's managed by the heuristic.
|
||||
if (n1.isHeuristic) {
|
||||
bool n1WasAllocable = n1.isAllocable;
|
||||
addEdgeContributions(eItr, n1Itr);
|
||||
updateAllocability(n1Itr);
|
||||
if (n1WasAllocable && !n1.isAllocable) {
|
||||
rnAllocableList.erase(n1.rnaItr);
|
||||
n1.rnuItr =
|
||||
rnUnallocableList.insert(rnUnallocableList.end(), n1Itr);
|
||||
}
|
||||
}
|
||||
|
||||
// Likewise for node 2.
|
||||
if (n2.isHeuristic) {
|
||||
bool n2WasAllocable = n2.isAllocable;
|
||||
addEdgeContributions(eItr, n2Itr);
|
||||
updateAllocability(n2Itr);
|
||||
if (n2WasAllocable && !n2.isAllocable) {
|
||||
rnAllocableList.erase(n2.rnaItr);
|
||||
n2.rnuItr =
|
||||
rnUnallocableList.insert(rnUnallocableList.end(), n2Itr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// \brief Handle disconnection of an edge from a node.
|
||||
/// @param eItr Edge iterator for edge being disconnected.
|
||||
/// @param nItr Node iterator for the node being disconnected from.
|
||||
///
|
||||
/// Updates allocability of the given node and, if appropriate, moves the
|
||||
/// node to a new list.
|
||||
void handleRemoveEdge(Graph::EdgeItr eItr, Graph::NodeItr nItr) {
|
||||
NodeData &nd = getHeuristicNodeData(nItr);
|
||||
|
||||
// If the node is not managed by the heuristic there's nothing to be
|
||||
// done.
|
||||
if (!nd.isHeuristic)
|
||||
return;
|
||||
|
||||
EdgeData &ed = getHeuristicEdgeData(eItr);
|
||||
(void)ed;
|
||||
assert(ed.isUpToDate && "Edge data is not up to date.");
|
||||
|
||||
// Update node.
|
||||
bool ndWasAllocable = nd.isAllocable;
|
||||
subtractEdgeContributions(eItr, nItr);
|
||||
updateAllocability(nItr);
|
||||
|
||||
// If the node has gone optimal...
|
||||
if (shouldOptimallyReduce(nItr)) {
|
||||
nd.isHeuristic = false;
|
||||
addToOptimalReduceList(nItr);
|
||||
if (ndWasAllocable) {
|
||||
rnAllocableList.erase(nd.rnaItr);
|
||||
} else {
|
||||
rnUnallocableList.erase(nd.rnuItr);
|
||||
}
|
||||
} else {
|
||||
// Node didn't go optimal, but we might have to move it
|
||||
// from "unallocable" to "allocable".
|
||||
if (!ndWasAllocable && nd.isAllocable) {
|
||||
rnUnallocableList.erase(nd.rnuItr);
|
||||
nd.rnaItr = rnAllocableList.insert(rnAllocableList.end(), nItr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
NodeData& getHeuristicNodeData(Graph::NodeItr nItr) {
|
||||
return getSolver().getHeuristicNodeData(nItr);
|
||||
}
|
||||
|
||||
EdgeData& getHeuristicEdgeData(Graph::EdgeItr eItr) {
|
||||
return getSolver().getHeuristicEdgeData(eItr);
|
||||
}
|
||||
|
||||
// Work out what this edge will contribute to the allocability of the
|
||||
// nodes connected to it.
|
||||
void computeEdgeContributions(Graph::EdgeItr eItr) {
|
||||
EdgeData &ed = getHeuristicEdgeData(eItr);
|
||||
|
||||
if (ed.isUpToDate)
|
||||
return; // Edge data is already up to date.
|
||||
|
||||
Matrix &eCosts = getGraph().getEdgeCosts(eItr);
|
||||
|
||||
unsigned numRegs = eCosts.getRows() - 1,
|
||||
numReverseRegs = eCosts.getCols() - 1;
|
||||
|
||||
std::vector<unsigned> rowInfCounts(numRegs, 0),
|
||||
colInfCounts(numReverseRegs, 0);
|
||||
|
||||
ed.worst = 0;
|
||||
ed.reverseWorst = 0;
|
||||
ed.unsafe.clear();
|
||||
ed.unsafe.resize(numRegs, 0);
|
||||
ed.reverseUnsafe.clear();
|
||||
ed.reverseUnsafe.resize(numReverseRegs, 0);
|
||||
|
||||
for (unsigned i = 0; i < numRegs; ++i) {
|
||||
for (unsigned j = 0; j < numReverseRegs; ++j) {
|
||||
if (eCosts[i + 1][j + 1] ==
|
||||
std::numeric_limits<PBQPNum>::infinity()) {
|
||||
ed.unsafe[i] = 1;
|
||||
ed.reverseUnsafe[j] = 1;
|
||||
++rowInfCounts[i];
|
||||
++colInfCounts[j];
|
||||
|
||||
if (colInfCounts[j] > ed.worst) {
|
||||
ed.worst = colInfCounts[j];
|
||||
}
|
||||
|
||||
if (rowInfCounts[i] > ed.reverseWorst) {
|
||||
ed.reverseWorst = rowInfCounts[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ed.isUpToDate = true;
|
||||
}
|
||||
|
||||
// Add the contributions of the given edge to the given node's
|
||||
// numDenied and safe members. No action is taken other than to update
|
||||
// these member values. Once updated these numbers can be used by clients
|
||||
// to update the node's allocability.
|
||||
void addEdgeContributions(Graph::EdgeItr eItr, Graph::NodeItr nItr) {
|
||||
EdgeData &ed = getHeuristicEdgeData(eItr);
|
||||
|
||||
assert(ed.isUpToDate && "Using out-of-date edge numbers.");
|
||||
|
||||
NodeData &nd = getHeuristicNodeData(nItr);
|
||||
unsigned numRegs = getGraph().getNodeCosts(nItr).getLength() - 1;
|
||||
|
||||
bool nIsNode1 = nItr == getGraph().getEdgeNode1(eItr);
|
||||
EdgeData::UnsafeArray &unsafe =
|
||||
nIsNode1 ? ed.unsafe : ed.reverseUnsafe;
|
||||
nd.numDenied += nIsNode1 ? ed.worst : ed.reverseWorst;
|
||||
|
||||
for (unsigned r = 0; r < numRegs; ++r) {
|
||||
if (unsafe[r]) {
|
||||
if (nd.unsafeDegrees[r]==0) {
|
||||
--nd.numSafe;
|
||||
}
|
||||
++nd.unsafeDegrees[r];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Subtract the contributions of the given edge to the given node's
|
||||
// numDenied and safe members. No action is taken other than to update
|
||||
// these member values. Once updated these numbers can be used by clients
|
||||
// to update the node's allocability.
|
||||
void subtractEdgeContributions(Graph::EdgeItr eItr, Graph::NodeItr nItr) {
|
||||
EdgeData &ed = getHeuristicEdgeData(eItr);
|
||||
|
||||
assert(ed.isUpToDate && "Using out-of-date edge numbers.");
|
||||
|
||||
NodeData &nd = getHeuristicNodeData(nItr);
|
||||
unsigned numRegs = getGraph().getNodeCosts(nItr).getLength() - 1;
|
||||
|
||||
bool nIsNode1 = nItr == getGraph().getEdgeNode1(eItr);
|
||||
EdgeData::UnsafeArray &unsafe =
|
||||
nIsNode1 ? ed.unsafe : ed.reverseUnsafe;
|
||||
nd.numDenied -= nIsNode1 ? ed.worst : ed.reverseWorst;
|
||||
|
||||
for (unsigned r = 0; r < numRegs; ++r) {
|
||||
if (unsafe[r]) {
|
||||
if (nd.unsafeDegrees[r] == 1) {
|
||||
++nd.numSafe;
|
||||
}
|
||||
--nd.unsafeDegrees[r];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void updateAllocability(Graph::NodeItr nItr) {
|
||||
NodeData &nd = getHeuristicNodeData(nItr);
|
||||
unsigned numRegs = getGraph().getNodeCosts(nItr).getLength() - 1;
|
||||
nd.isAllocable = nd.numDenied < numRegs || nd.numSafe > 0;
|
||||
}
|
||||
|
||||
void initializeNode(Graph::NodeItr nItr) {
|
||||
NodeData &nd = getHeuristicNodeData(nItr);
|
||||
|
||||
if (nd.isInitialized)
|
||||
return; // Node data is already up to date.
|
||||
|
||||
unsigned numRegs = getGraph().getNodeCosts(nItr).getLength() - 1;
|
||||
|
||||
nd.numDenied = 0;
|
||||
const Vector& nCosts = getGraph().getNodeCosts(nItr);
|
||||
for (unsigned i = 1; i < nCosts.getLength(); ++i) {
|
||||
if (nCosts[i] == std::numeric_limits<PBQPNum>::infinity())
|
||||
++nd.numDenied;
|
||||
}
|
||||
|
||||
nd.numSafe = numRegs;
|
||||
nd.unsafeDegrees.resize(numRegs, 0);
|
||||
|
||||
typedef HeuristicSolverImpl<Briggs>::SolverEdgeItr SolverEdgeItr;
|
||||
|
||||
for (SolverEdgeItr aeItr = getSolver().solverEdgesBegin(nItr),
|
||||
aeEnd = getSolver().solverEdgesEnd(nItr);
|
||||
aeItr != aeEnd; ++aeItr) {
|
||||
|
||||
Graph::EdgeItr eItr = *aeItr;
|
||||
computeEdgeContributions(eItr);
|
||||
addEdgeContributions(eItr, nItr);
|
||||
}
|
||||
|
||||
updateAllocability(nItr);
|
||||
nd.isInitialized = true;
|
||||
}
|
||||
|
||||
void handleRemoveNode(Graph::NodeItr xnItr) {
|
||||
typedef HeuristicSolverImpl<Briggs>::SolverEdgeItr SolverEdgeItr;
|
||||
std::vector<Graph::EdgeItr> edgesToRemove;
|
||||
for (SolverEdgeItr aeItr = getSolver().solverEdgesBegin(xnItr),
|
||||
aeEnd = getSolver().solverEdgesEnd(xnItr);
|
||||
aeItr != aeEnd; ++aeItr) {
|
||||
Graph::NodeItr ynItr = getGraph().getEdgeOtherNode(*aeItr, xnItr);
|
||||
handleRemoveEdge(*aeItr, ynItr);
|
||||
edgesToRemove.push_back(*aeItr);
|
||||
}
|
||||
while (!edgesToRemove.empty()) {
|
||||
getSolver().removeSolverEdge(edgesToRemove.back());
|
||||
edgesToRemove.pop_back();
|
||||
}
|
||||
}
|
||||
|
||||
RNAllocableList rnAllocableList;
|
||||
RNUnallocableList rnUnallocableList;
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif // LLVM_CODEGEN_PBQP_HEURISTICS_BRIGGS_H
|
||||
288
thirdparty/clang/include/llvm/CodeGen/PBQP/Math.h
vendored
Normal file
288
thirdparty/clang/include/llvm/CodeGen/PBQP/Math.h
vendored
Normal file
@@ -0,0 +1,288 @@
|
||||
//===------ Math.h - PBQP Vector and Matrix classes -------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_PBQP_MATH_H
|
||||
#define LLVM_CODEGEN_PBQP_MATH_H
|
||||
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
#include <functional>
|
||||
|
||||
namespace PBQP {
|
||||
|
||||
typedef float PBQPNum;
|
||||
|
||||
/// \brief PBQP Vector class.
|
||||
class Vector {
|
||||
public:
|
||||
|
||||
/// \brief Construct a PBQP vector of the given size.
|
||||
explicit Vector(unsigned length) :
|
||||
length(length), data(new PBQPNum[length]) {
|
||||
}
|
||||
|
||||
/// \brief Construct a PBQP vector with initializer.
|
||||
Vector(unsigned length, PBQPNum initVal) :
|
||||
length(length), data(new PBQPNum[length]) {
|
||||
std::fill(data, data + length, initVal);
|
||||
}
|
||||
|
||||
/// \brief Copy construct a PBQP vector.
|
||||
Vector(const Vector &v) :
|
||||
length(v.length), data(new PBQPNum[length]) {
|
||||
std::copy(v.data, v.data + length, data);
|
||||
}
|
||||
|
||||
/// \brief Destroy this vector, return its memory.
|
||||
~Vector() { delete[] data; }
|
||||
|
||||
/// \brief Assignment operator.
|
||||
Vector& operator=(const Vector &v) {
|
||||
delete[] data;
|
||||
length = v.length;
|
||||
data = new PBQPNum[length];
|
||||
std::copy(v.data, v.data + length, data);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/// \brief Return the length of the vector
|
||||
unsigned getLength() const {
|
||||
return length;
|
||||
}
|
||||
|
||||
/// \brief Element access.
|
||||
PBQPNum& operator[](unsigned index) {
|
||||
assert(index < length && "Vector element access out of bounds.");
|
||||
return data[index];
|
||||
}
|
||||
|
||||
/// \brief Const element access.
|
||||
const PBQPNum& operator[](unsigned index) const {
|
||||
assert(index < length && "Vector element access out of bounds.");
|
||||
return data[index];
|
||||
}
|
||||
|
||||
/// \brief Add another vector to this one.
|
||||
Vector& operator+=(const Vector &v) {
|
||||
assert(length == v.length && "Vector length mismatch.");
|
||||
std::transform(data, data + length, v.data, data, std::plus<PBQPNum>());
|
||||
return *this;
|
||||
}
|
||||
|
||||
/// \brief Subtract another vector from this one.
|
||||
Vector& operator-=(const Vector &v) {
|
||||
assert(length == v.length && "Vector length mismatch.");
|
||||
std::transform(data, data + length, v.data, data, std::minus<PBQPNum>());
|
||||
return *this;
|
||||
}
|
||||
|
||||
/// \brief Returns the index of the minimum value in this vector
|
||||
unsigned minIndex() const {
|
||||
return std::min_element(data, data + length) - data;
|
||||
}
|
||||
|
||||
private:
|
||||
unsigned length;
|
||||
PBQPNum *data;
|
||||
};
|
||||
|
||||
/// \brief Output a textual representation of the given vector on the given
|
||||
/// output stream.
|
||||
template <typename OStream>
|
||||
OStream& operator<<(OStream &os, const Vector &v) {
|
||||
assert((v.getLength() != 0) && "Zero-length vector badness.");
|
||||
|
||||
os << "[ " << v[0];
|
||||
for (unsigned i = 1; i < v.getLength(); ++i) {
|
||||
os << ", " << v[i];
|
||||
}
|
||||
os << " ]";
|
||||
|
||||
return os;
|
||||
}
|
||||
|
||||
|
||||
/// \brief PBQP Matrix class
|
||||
class Matrix {
|
||||
public:
|
||||
|
||||
/// \brief Construct a PBQP Matrix with the given dimensions.
|
||||
Matrix(unsigned rows, unsigned cols) :
|
||||
rows(rows), cols(cols), data(new PBQPNum[rows * cols]) {
|
||||
}
|
||||
|
||||
/// \brief Construct a PBQP Matrix with the given dimensions and initial
|
||||
/// value.
|
||||
Matrix(unsigned rows, unsigned cols, PBQPNum initVal) :
|
||||
rows(rows), cols(cols), data(new PBQPNum[rows * cols]) {
|
||||
std::fill(data, data + (rows * cols), initVal);
|
||||
}
|
||||
|
||||
/// \brief Copy construct a PBQP matrix.
|
||||
Matrix(const Matrix &m) :
|
||||
rows(m.rows), cols(m.cols), data(new PBQPNum[rows * cols]) {
|
||||
std::copy(m.data, m.data + (rows * cols), data);
|
||||
}
|
||||
|
||||
/// \brief Destroy this matrix, return its memory.
|
||||
~Matrix() { delete[] data; }
|
||||
|
||||
/// \brief Assignment operator.
|
||||
Matrix& operator=(const Matrix &m) {
|
||||
delete[] data;
|
||||
rows = m.rows; cols = m.cols;
|
||||
data = new PBQPNum[rows * cols];
|
||||
std::copy(m.data, m.data + (rows * cols), data);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/// \brief Return the number of rows in this matrix.
|
||||
unsigned getRows() const { return rows; }
|
||||
|
||||
/// \brief Return the number of cols in this matrix.
|
||||
unsigned getCols() const { return cols; }
|
||||
|
||||
/// \brief Matrix element access.
|
||||
PBQPNum* operator[](unsigned r) {
|
||||
assert(r < rows && "Row out of bounds.");
|
||||
return data + (r * cols);
|
||||
}
|
||||
|
||||
/// \brief Matrix element access.
|
||||
const PBQPNum* operator[](unsigned r) const {
|
||||
assert(r < rows && "Row out of bounds.");
|
||||
return data + (r * cols);
|
||||
}
|
||||
|
||||
/// \brief Returns the given row as a vector.
|
||||
Vector getRowAsVector(unsigned r) const {
|
||||
Vector v(cols);
|
||||
for (unsigned c = 0; c < cols; ++c)
|
||||
v[c] = (*this)[r][c];
|
||||
return v;
|
||||
}
|
||||
|
||||
/// \brief Returns the given column as a vector.
|
||||
Vector getColAsVector(unsigned c) const {
|
||||
Vector v(rows);
|
||||
for (unsigned r = 0; r < rows; ++r)
|
||||
v[r] = (*this)[r][c];
|
||||
return v;
|
||||
}
|
||||
|
||||
/// \brief Reset the matrix to the given value.
|
||||
Matrix& reset(PBQPNum val = 0) {
|
||||
std::fill(data, data + (rows * cols), val);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/// \brief Set a single row of this matrix to the given value.
|
||||
Matrix& setRow(unsigned r, PBQPNum val) {
|
||||
assert(r < rows && "Row out of bounds.");
|
||||
std::fill(data + (r * cols), data + ((r + 1) * cols), val);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/// \brief Set a single column of this matrix to the given value.
|
||||
Matrix& setCol(unsigned c, PBQPNum val) {
|
||||
assert(c < cols && "Column out of bounds.");
|
||||
for (unsigned r = 0; r < rows; ++r)
|
||||
(*this)[r][c] = val;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/// \brief Matrix transpose.
|
||||
Matrix transpose() const {
|
||||
Matrix m(cols, rows);
|
||||
for (unsigned r = 0; r < rows; ++r)
|
||||
for (unsigned c = 0; c < cols; ++c)
|
||||
m[c][r] = (*this)[r][c];
|
||||
return m;
|
||||
}
|
||||
|
||||
/// \brief Returns the diagonal of the matrix as a vector.
|
||||
///
|
||||
/// Matrix must be square.
|
||||
Vector diagonalize() const {
|
||||
assert(rows == cols && "Attempt to diagonalize non-square matrix.");
|
||||
|
||||
Vector v(rows);
|
||||
for (unsigned r = 0; r < rows; ++r)
|
||||
v[r] = (*this)[r][r];
|
||||
return v;
|
||||
}
|
||||
|
||||
/// \brief Add the given matrix to this one.
|
||||
Matrix& operator+=(const Matrix &m) {
|
||||
assert(rows == m.rows && cols == m.cols &&
|
||||
"Matrix dimensions mismatch.");
|
||||
std::transform(data, data + (rows * cols), m.data, data,
|
||||
std::plus<PBQPNum>());
|
||||
return *this;
|
||||
}
|
||||
|
||||
/// \brief Returns the minimum of the given row
|
||||
PBQPNum getRowMin(unsigned r) const {
|
||||
assert(r < rows && "Row out of bounds");
|
||||
return *std::min_element(data + (r * cols), data + ((r + 1) * cols));
|
||||
}
|
||||
|
||||
/// \brief Returns the minimum of the given column
|
||||
PBQPNum getColMin(unsigned c) const {
|
||||
PBQPNum minElem = (*this)[0][c];
|
||||
for (unsigned r = 1; r < rows; ++r)
|
||||
if ((*this)[r][c] < minElem) minElem = (*this)[r][c];
|
||||
return minElem;
|
||||
}
|
||||
|
||||
/// \brief Subtracts the given scalar from the elements of the given row.
|
||||
Matrix& subFromRow(unsigned r, PBQPNum val) {
|
||||
assert(r < rows && "Row out of bounds");
|
||||
std::transform(data + (r * cols), data + ((r + 1) * cols),
|
||||
data + (r * cols),
|
||||
std::bind2nd(std::minus<PBQPNum>(), val));
|
||||
return *this;
|
||||
}
|
||||
|
||||
/// \brief Subtracts the given scalar from the elements of the given column.
|
||||
Matrix& subFromCol(unsigned c, PBQPNum val) {
|
||||
for (unsigned r = 0; r < rows; ++r)
|
||||
(*this)[r][c] -= val;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/// \brief Returns true if this is a zero matrix.
|
||||
bool isZero() const {
|
||||
return find_if(data, data + (rows * cols),
|
||||
std::bind2nd(std::not_equal_to<PBQPNum>(), 0)) ==
|
||||
data + (rows * cols);
|
||||
}
|
||||
|
||||
private:
|
||||
unsigned rows, cols;
|
||||
PBQPNum *data;
|
||||
};
|
||||
|
||||
/// \brief Output a textual representation of the given matrix on the given
|
||||
/// output stream.
|
||||
template <typename OStream>
|
||||
OStream& operator<<(OStream &os, const Matrix &m) {
|
||||
|
||||
assert((m.getRows() != 0) && "Zero-row matrix badness.");
|
||||
|
||||
for (unsigned i = 0; i < m.getRows(); ++i) {
|
||||
os << m.getRowAsVector(i);
|
||||
}
|
||||
|
||||
return os;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif // LLVM_CODEGEN_PBQP_MATH_H
|
||||
93
thirdparty/clang/include/llvm/CodeGen/PBQP/Solution.h
vendored
Normal file
93
thirdparty/clang/include/llvm/CodeGen/PBQP/Solution.h
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
//===-- Solution.h ------- PBQP Solution ------------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// PBQP Solution class.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_PBQP_SOLUTION_H
|
||||
#define LLVM_CODEGEN_PBQP_SOLUTION_H
|
||||
|
||||
#include "Graph.h"
|
||||
#include "Math.h"
|
||||
#include <map>
|
||||
|
||||
namespace PBQP {
|
||||
|
||||
/// \brief Represents a solution to a PBQP problem.
|
||||
///
|
||||
/// To get the selection for each node in the problem use the getSelection method.
|
||||
class Solution {
|
||||
private:
|
||||
|
||||
typedef std::map<Graph::ConstNodeItr, unsigned,
|
||||
NodeItrComparator> SelectionsMap;
|
||||
SelectionsMap selections;
|
||||
|
||||
unsigned r0Reductions, r1Reductions, r2Reductions, rNReductions;
|
||||
|
||||
public:
|
||||
|
||||
/// \brief Initialise an empty solution.
|
||||
Solution()
|
||||
: r0Reductions(0), r1Reductions(0), r2Reductions(0), rNReductions(0) {}
|
||||
|
||||
/// \brief Number of nodes for which selections have been made.
|
||||
/// @return Number of nodes for which selections have been made.
|
||||
unsigned numNodes() const { return selections.size(); }
|
||||
|
||||
/// \brief Records a reduction via the R0 rule. Should be called from the
|
||||
/// solver only.
|
||||
void recordR0() { ++r0Reductions; }
|
||||
|
||||
/// \brief Returns the number of R0 reductions applied to solve the problem.
|
||||
unsigned numR0Reductions() const { return r0Reductions; }
|
||||
|
||||
/// \brief Records a reduction via the R1 rule. Should be called from the
|
||||
/// solver only.
|
||||
void recordR1() { ++r1Reductions; }
|
||||
|
||||
/// \brief Returns the number of R1 reductions applied to solve the problem.
|
||||
unsigned numR1Reductions() const { return r1Reductions; }
|
||||
|
||||
/// \brief Records a reduction via the R2 rule. Should be called from the
|
||||
/// solver only.
|
||||
void recordR2() { ++r2Reductions; }
|
||||
|
||||
/// \brief Returns the number of R2 reductions applied to solve the problem.
|
||||
unsigned numR2Reductions() const { return r2Reductions; }
|
||||
|
||||
/// \brief Records a reduction via the RN rule. Should be called from the
|
||||
/// solver only.
|
||||
void recordRN() { ++ rNReductions; }
|
||||
|
||||
/// \brief Returns the number of RN reductions applied to solve the problem.
|
||||
unsigned numRNReductions() const { return rNReductions; }
|
||||
|
||||
/// \brief Set the selection for a given node.
|
||||
/// @param nItr Node iterator.
|
||||
/// @param selection Selection for nItr.
|
||||
void setSelection(Graph::NodeItr nItr, unsigned selection) {
|
||||
selections[nItr] = selection;
|
||||
}
|
||||
|
||||
/// \brief Get a node's selection.
|
||||
/// @param nItr Node iterator.
|
||||
/// @return The selection for nItr;
|
||||
unsigned getSelection(Graph::ConstNodeItr nItr) const {
|
||||
SelectionsMap::const_iterator sItr = selections.find(nItr);
|
||||
assert(sItr != selections.end() && "No selection for node.");
|
||||
return sItr->second;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif // LLVM_CODEGEN_PBQP_SOLUTION_H
|
||||
563
thirdparty/clang/include/llvm/CodeGen/Passes.h
vendored
Normal file
563
thirdparty/clang/include/llvm/CodeGen/Passes.h
vendored
Normal file
@@ -0,0 +1,563 @@
|
||||
//===-- Passes.h - Target independent code generation passes ----*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines interfaces to access the target independent code generation
|
||||
// passes provided by the LLVM backend.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_PASSES_H
|
||||
#define LLVM_CODEGEN_PASSES_H
|
||||
|
||||
#include "llvm/Pass.h"
|
||||
#include "llvm/Target/TargetMachine.h"
|
||||
#include <string>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class FunctionPass;
|
||||
class MachineFunctionPass;
|
||||
class PassInfo;
|
||||
class PassManagerBase;
|
||||
class TargetLoweringBase;
|
||||
class TargetLowering;
|
||||
class TargetRegisterClass;
|
||||
class raw_ostream;
|
||||
}
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class PassConfigImpl;
|
||||
|
||||
/// Discriminated union of Pass ID types.
|
||||
///
|
||||
/// The PassConfig API prefers dealing with IDs because they are safer and more
|
||||
/// efficient. IDs decouple configuration from instantiation. This way, when a
|
||||
/// pass is overriden, it isn't unnecessarily instantiated. It is also unsafe to
|
||||
/// refer to a Pass pointer after adding it to a pass manager, which deletes
|
||||
/// redundant pass instances.
|
||||
///
|
||||
/// However, it is convient to directly instantiate target passes with
|
||||
/// non-default ctors. These often don't have a registered PassInfo. Rather than
|
||||
/// force all target passes to implement the pass registry boilerplate, allow
|
||||
/// the PassConfig API to handle either type.
|
||||
///
|
||||
/// AnalysisID is sadly char*, so PointerIntPair won't work.
|
||||
class IdentifyingPassPtr {
|
||||
union {
|
||||
AnalysisID ID;
|
||||
Pass *P;
|
||||
};
|
||||
bool IsInstance;
|
||||
public:
|
||||
IdentifyingPassPtr() : P(0), IsInstance(false) {}
|
||||
IdentifyingPassPtr(AnalysisID IDPtr) : ID(IDPtr), IsInstance(false) {}
|
||||
IdentifyingPassPtr(Pass *InstancePtr) : P(InstancePtr), IsInstance(true) {}
|
||||
|
||||
bool isValid() const { return P; }
|
||||
bool isInstance() const { return IsInstance; }
|
||||
|
||||
AnalysisID getID() const {
|
||||
assert(!IsInstance && "Not a Pass ID");
|
||||
return ID;
|
||||
}
|
||||
Pass *getInstance() const {
|
||||
assert(IsInstance && "Not a Pass Instance");
|
||||
return P;
|
||||
}
|
||||
};
|
||||
|
||||
template <> struct isPodLike<IdentifyingPassPtr> {
|
||||
static const bool value = true;
|
||||
};
|
||||
|
||||
/// Target-Independent Code Generator Pass Configuration Options.
|
||||
///
|
||||
/// This is an ImmutablePass solely for the purpose of exposing CodeGen options
|
||||
/// to the internals of other CodeGen passes.
|
||||
class TargetPassConfig : public ImmutablePass {
|
||||
public:
|
||||
/// Pseudo Pass IDs. These are defined within TargetPassConfig because they
|
||||
/// are unregistered pass IDs. They are only useful for use with
|
||||
/// TargetPassConfig APIs to identify multiple occurrences of the same pass.
|
||||
///
|
||||
|
||||
/// EarlyTailDuplicate - A clone of the TailDuplicate pass that runs early
|
||||
/// during codegen, on SSA form.
|
||||
static char EarlyTailDuplicateID;
|
||||
|
||||
/// PostRAMachineLICM - A clone of the LICM pass that runs during late machine
|
||||
/// optimization after regalloc.
|
||||
static char PostRAMachineLICMID;
|
||||
|
||||
private:
|
||||
PassManagerBase *PM;
|
||||
AnalysisID StartAfter;
|
||||
AnalysisID StopAfter;
|
||||
bool Started;
|
||||
bool Stopped;
|
||||
|
||||
protected:
|
||||
TargetMachine *TM;
|
||||
PassConfigImpl *Impl; // Internal data structures
|
||||
bool Initialized; // Flagged after all passes are configured.
|
||||
|
||||
// Target Pass Options
|
||||
// Targets provide a default setting, user flags override.
|
||||
//
|
||||
bool DisableVerify;
|
||||
|
||||
/// Default setting for -enable-tail-merge on this target.
|
||||
bool EnableTailMerge;
|
||||
|
||||
public:
|
||||
TargetPassConfig(TargetMachine *tm, PassManagerBase &pm);
|
||||
// Dummy constructor.
|
||||
TargetPassConfig();
|
||||
|
||||
virtual ~TargetPassConfig();
|
||||
|
||||
static char ID;
|
||||
|
||||
/// Get the right type of TargetMachine for this target.
|
||||
template<typename TMC> TMC &getTM() const {
|
||||
return *static_cast<TMC*>(TM);
|
||||
}
|
||||
|
||||
const TargetLowering *getTargetLowering() const {
|
||||
return TM->getTargetLowering();
|
||||
}
|
||||
|
||||
//
|
||||
void setInitialized() { Initialized = true; }
|
||||
|
||||
CodeGenOpt::Level getOptLevel() const { return TM->getOptLevel(); }
|
||||
|
||||
/// setStartStopPasses - Set the StartAfter and StopAfter passes to allow
|
||||
/// running only a portion of the normal code-gen pass sequence. If the
|
||||
/// Start pass ID is zero, then compilation will begin at the normal point;
|
||||
/// otherwise, clear the Started flag to indicate that passes should not be
|
||||
/// added until the starting pass is seen. If the Stop pass ID is zero,
|
||||
/// then compilation will continue to the end.
|
||||
void setStartStopPasses(AnalysisID Start, AnalysisID Stop) {
|
||||
StartAfter = Start;
|
||||
StopAfter = Stop;
|
||||
Started = (StartAfter == 0);
|
||||
}
|
||||
|
||||
void setDisableVerify(bool Disable) { setOpt(DisableVerify, Disable); }
|
||||
|
||||
bool getEnableTailMerge() const { return EnableTailMerge; }
|
||||
void setEnableTailMerge(bool Enable) { setOpt(EnableTailMerge, Enable); }
|
||||
|
||||
/// Allow the target to override a specific pass without overriding the pass
|
||||
/// pipeline. When passes are added to the standard pipeline at the
|
||||
/// point where StandardID is expected, add TargetID in its place.
|
||||
void substitutePass(AnalysisID StandardID, IdentifyingPassPtr TargetID);
|
||||
|
||||
/// Insert InsertedPassID pass after TargetPassID pass.
|
||||
void insertPass(AnalysisID TargetPassID, IdentifyingPassPtr InsertedPassID);
|
||||
|
||||
/// Allow the target to enable a specific standard pass by default.
|
||||
void enablePass(AnalysisID PassID) { substitutePass(PassID, PassID); }
|
||||
|
||||
/// Allow the target to disable a specific standard pass by default.
|
||||
void disablePass(AnalysisID PassID) {
|
||||
substitutePass(PassID, IdentifyingPassPtr());
|
||||
}
|
||||
|
||||
/// Return the pass substituted for StandardID by the target.
|
||||
/// If no substitution exists, return StandardID.
|
||||
IdentifyingPassPtr getPassSubstitution(AnalysisID StandardID) const;
|
||||
|
||||
/// Return true if the optimized regalloc pipeline is enabled.
|
||||
bool getOptimizeRegAlloc() const;
|
||||
|
||||
/// Add common target configurable passes that perform LLVM IR to IR
|
||||
/// transforms following machine independent optimization.
|
||||
virtual void addIRPasses();
|
||||
|
||||
/// Add passes to lower exception handling for the code generator.
|
||||
void addPassesToHandleExceptions();
|
||||
|
||||
/// Add pass to prepare the LLVM IR for code generation. This should be done
|
||||
/// before exception handling preparation passes.
|
||||
virtual void addCodeGenPrepare();
|
||||
|
||||
/// Add common passes that perform LLVM IR to IR transforms in preparation for
|
||||
/// instruction selection.
|
||||
virtual void addISelPrepare();
|
||||
|
||||
/// addInstSelector - This method should install an instruction selector pass,
|
||||
/// which converts from LLVM code to machine instructions.
|
||||
virtual bool addInstSelector() {
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Add the complete, standard set of LLVM CodeGen passes.
|
||||
/// Fully developed targets will not generally override this.
|
||||
virtual void addMachinePasses();
|
||||
|
||||
protected:
|
||||
// Helper to verify the analysis is really immutable.
|
||||
void setOpt(bool &Opt, bool Val);
|
||||
|
||||
/// Methods with trivial inline returns are convenient points in the common
|
||||
/// codegen pass pipeline where targets may insert passes. Methods with
|
||||
/// out-of-line standard implementations are major CodeGen stages called by
|
||||
/// addMachinePasses. Some targets may override major stages when inserting
|
||||
/// passes is insufficient, but maintaining overriden stages is more work.
|
||||
///
|
||||
|
||||
/// addPreISelPasses - This method should add any "last minute" LLVM->LLVM
|
||||
/// passes (which are run just before instruction selector).
|
||||
virtual bool addPreISel() {
|
||||
return true;
|
||||
}
|
||||
|
||||
/// addMachineSSAOptimization - Add standard passes that optimize machine
|
||||
/// instructions in SSA form.
|
||||
virtual void addMachineSSAOptimization();
|
||||
|
||||
/// Add passes that optimize instruction level parallelism for out-of-order
|
||||
/// targets. These passes are run while the machine code is still in SSA
|
||||
/// form, so they can use MachineTraceMetrics to control their heuristics.
|
||||
///
|
||||
/// All passes added here should preserve the MachineDominatorTree,
|
||||
/// MachineLoopInfo, and MachineTraceMetrics analyses.
|
||||
virtual bool addILPOpts() {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// addPreRegAlloc - This method may be implemented by targets that want to
|
||||
/// run passes immediately before register allocation. This should return
|
||||
/// true if -print-machineinstrs should print after these passes.
|
||||
virtual bool addPreRegAlloc() {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// createTargetRegisterAllocator - Create the register allocator pass for
|
||||
/// this target at the current optimization level.
|
||||
virtual FunctionPass *createTargetRegisterAllocator(bool Optimized);
|
||||
|
||||
/// addFastRegAlloc - Add the minimum set of target-independent passes that
|
||||
/// are required for fast register allocation.
|
||||
virtual void addFastRegAlloc(FunctionPass *RegAllocPass);
|
||||
|
||||
/// addOptimizedRegAlloc - Add passes related to register allocation.
|
||||
/// LLVMTargetMachine provides standard regalloc passes for most targets.
|
||||
virtual void addOptimizedRegAlloc(FunctionPass *RegAllocPass);
|
||||
|
||||
/// addPreRewrite - Add passes to the optimized register allocation pipeline
|
||||
/// after register allocation is complete, but before virtual registers are
|
||||
/// rewritten to physical registers.
|
||||
///
|
||||
/// These passes must preserve VirtRegMap and LiveIntervals, and when running
|
||||
/// after RABasic or RAGreedy, they should take advantage of LiveRegMatrix.
|
||||
/// When these passes run, VirtRegMap contains legal physreg assignments for
|
||||
/// all virtual registers.
|
||||
virtual bool addPreRewrite() {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// addPostRegAlloc - This method may be implemented by targets that want to
|
||||
/// run passes after register allocation pass pipeline but before
|
||||
/// prolog-epilog insertion. This should return true if -print-machineinstrs
|
||||
/// should print after these passes.
|
||||
virtual bool addPostRegAlloc() {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Add passes that optimize machine instructions after register allocation.
|
||||
virtual void addMachineLateOptimization();
|
||||
|
||||
/// addPreSched2 - This method may be implemented by targets that want to
|
||||
/// run passes after prolog-epilog insertion and before the second instruction
|
||||
/// scheduling pass. This should return true if -print-machineinstrs should
|
||||
/// print after these passes.
|
||||
virtual bool addPreSched2() {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// addGCPasses - Add late codegen passes that analyze code for garbage
|
||||
/// collection. This should return true if GC info should be printed after
|
||||
/// these passes.
|
||||
virtual bool addGCPasses();
|
||||
|
||||
/// Add standard basic block placement passes.
|
||||
virtual void addBlockPlacement();
|
||||
|
||||
/// addPreEmitPass - This pass may be implemented by targets that want to run
|
||||
/// passes immediately before machine code is emitted. This should return
|
||||
/// true if -print-machineinstrs should print out the code after the passes.
|
||||
virtual bool addPreEmitPass() {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Utilities for targets to add passes to the pass manager.
|
||||
///
|
||||
|
||||
/// Add a CodeGen pass at this point in the pipeline after checking overrides.
|
||||
/// Return the pass that was added, or zero if no pass was added.
|
||||
AnalysisID addPass(AnalysisID PassID);
|
||||
|
||||
/// Add a pass to the PassManager if that pass is supposed to be run, as
|
||||
/// determined by the StartAfter and StopAfter options.
|
||||
void addPass(Pass *P);
|
||||
|
||||
/// addMachinePasses helper to create the target-selected or overriden
|
||||
/// regalloc pass.
|
||||
FunctionPass *createRegAllocPass(bool Optimized);
|
||||
|
||||
/// printAndVerify - Add a pass to dump then verify the machine function, if
|
||||
/// those steps are enabled.
|
||||
///
|
||||
void printAndVerify(const char *Banner);
|
||||
};
|
||||
} // namespace llvm
|
||||
|
||||
/// List of target independent CodeGen pass IDs.
|
||||
namespace llvm {
|
||||
/// \brief Create a basic TargetTransformInfo analysis pass.
|
||||
///
|
||||
/// This pass implements the target transform info analysis using the target
|
||||
/// independent information available to the LLVM code generator.
|
||||
ImmutablePass *
|
||||
createBasicTargetTransformInfoPass(const TargetLoweringBase *TLI);
|
||||
|
||||
/// createUnreachableBlockEliminationPass - The LLVM code generator does not
|
||||
/// work well with unreachable basic blocks (what live ranges make sense for a
|
||||
/// block that cannot be reached?). As such, a code generator should either
|
||||
/// not instruction select unreachable blocks, or run this pass as its
|
||||
/// last LLVM modifying pass to clean up blocks that are not reachable from
|
||||
/// the entry block.
|
||||
FunctionPass *createUnreachableBlockEliminationPass();
|
||||
|
||||
/// MachineFunctionPrinter pass - This pass prints out the machine function to
|
||||
/// the given stream as a debugging tool.
|
||||
MachineFunctionPass *
|
||||
createMachineFunctionPrinterPass(raw_ostream &OS,
|
||||
const std::string &Banner ="");
|
||||
|
||||
/// MachineLoopInfo - This pass is a loop analysis pass.
|
||||
extern char &MachineLoopInfoID;
|
||||
|
||||
/// MachineDominators - This pass is a machine dominators analysis pass.
|
||||
extern char &MachineDominatorsID;
|
||||
|
||||
/// EdgeBundles analysis - Bundle machine CFG edges.
|
||||
extern char &EdgeBundlesID;
|
||||
|
||||
/// LiveVariables pass - This pass computes the set of blocks in which each
|
||||
/// variable is life and sets machine operand kill flags.
|
||||
extern char &LiveVariablesID;
|
||||
|
||||
/// PHIElimination - This pass eliminates machine instruction PHI nodes
|
||||
/// by inserting copy instructions. This destroys SSA information, but is the
|
||||
/// desired input for some register allocators. This pass is "required" by
|
||||
/// these register allocator like this: AU.addRequiredID(PHIEliminationID);
|
||||
extern char &PHIEliminationID;
|
||||
|
||||
/// StrongPHIElimination - This pass eliminates machine instruction PHI
|
||||
/// nodes by inserting copy instructions. This destroys SSA information, but
|
||||
/// is the desired input for some register allocators. This pass is
|
||||
/// "required" by these register allocator like this:
|
||||
/// AU.addRequiredID(PHIEliminationID);
|
||||
/// This pass is still in development
|
||||
extern char &StrongPHIEliminationID;
|
||||
|
||||
/// LiveIntervals - This analysis keeps track of the live ranges of virtual
|
||||
/// and physical registers.
|
||||
extern char &LiveIntervalsID;
|
||||
|
||||
/// LiveStacks pass. An analysis keeping track of the liveness of stack slots.
|
||||
extern char &LiveStacksID;
|
||||
|
||||
/// TwoAddressInstruction - This pass reduces two-address instructions to
|
||||
/// use two operands. This destroys SSA information but it is desired by
|
||||
/// register allocators.
|
||||
extern char &TwoAddressInstructionPassID;
|
||||
|
||||
/// ProcessImpicitDefs pass - This pass removes IMPLICIT_DEFs.
|
||||
extern char &ProcessImplicitDefsID;
|
||||
|
||||
/// RegisterCoalescer - This pass merges live ranges to eliminate copies.
|
||||
extern char &RegisterCoalescerID;
|
||||
|
||||
/// MachineScheduler - This pass schedules machine instructions.
|
||||
extern char &MachineSchedulerID;
|
||||
|
||||
/// SpillPlacement analysis. Suggest optimal placement of spill code between
|
||||
/// basic blocks.
|
||||
extern char &SpillPlacementID;
|
||||
|
||||
/// VirtRegRewriter pass. Rewrite virtual registers to physical registers as
|
||||
/// assigned in VirtRegMap.
|
||||
extern char &VirtRegRewriterID;
|
||||
|
||||
/// UnreachableMachineBlockElimination - This pass removes unreachable
|
||||
/// machine basic blocks.
|
||||
extern char &UnreachableMachineBlockElimID;
|
||||
|
||||
/// DeadMachineInstructionElim - This pass removes dead machine instructions.
|
||||
extern char &DeadMachineInstructionElimID;
|
||||
|
||||
/// FastRegisterAllocation Pass - This pass register allocates as fast as
|
||||
/// possible. It is best suited for debug code where live ranges are short.
|
||||
///
|
||||
FunctionPass *createFastRegisterAllocator();
|
||||
|
||||
/// BasicRegisterAllocation Pass - This pass implements a degenerate global
|
||||
/// register allocator using the basic regalloc framework.
|
||||
///
|
||||
FunctionPass *createBasicRegisterAllocator();
|
||||
|
||||
/// Greedy register allocation pass - This pass implements a global register
|
||||
/// allocator for optimized builds.
|
||||
///
|
||||
FunctionPass *createGreedyRegisterAllocator();
|
||||
|
||||
/// PBQPRegisterAllocation Pass - This pass implements the Partitioned Boolean
|
||||
/// Quadratic Prograaming (PBQP) based register allocator.
|
||||
///
|
||||
FunctionPass *createDefaultPBQPRegisterAllocator();
|
||||
|
||||
/// PrologEpilogCodeInserter - This pass inserts prolog and epilog code,
|
||||
/// and eliminates abstract frame references.
|
||||
extern char &PrologEpilogCodeInserterID;
|
||||
|
||||
/// ExpandPostRAPseudos - This pass expands pseudo instructions after
|
||||
/// register allocation.
|
||||
extern char &ExpandPostRAPseudosID;
|
||||
|
||||
/// createPostRAScheduler - This pass performs post register allocation
|
||||
/// scheduling.
|
||||
extern char &PostRASchedulerID;
|
||||
|
||||
/// BranchFolding - This pass performs machine code CFG based
|
||||
/// optimizations to delete branches to branches, eliminate branches to
|
||||
/// successor blocks (creating fall throughs), and eliminating branches over
|
||||
/// branches.
|
||||
extern char &BranchFolderPassID;
|
||||
|
||||
/// MachineFunctionPrinterPass - This pass prints out MachineInstr's.
|
||||
extern char &MachineFunctionPrinterPassID;
|
||||
|
||||
/// TailDuplicate - Duplicate blocks with unconditional branches
|
||||
/// into tails of their predecessors.
|
||||
extern char &TailDuplicateID;
|
||||
|
||||
/// MachineTraceMetrics - This pass computes critical path and CPU resource
|
||||
/// usage in an ensemble of traces.
|
||||
extern char &MachineTraceMetricsID;
|
||||
|
||||
/// EarlyIfConverter - This pass performs if-conversion on SSA form by
|
||||
/// inserting cmov instructions.
|
||||
extern char &EarlyIfConverterID;
|
||||
|
||||
/// StackSlotColoring - This pass performs stack coloring and merging.
|
||||
/// It merges disjoint allocas to reduce the stack size.
|
||||
extern char &StackColoringID;
|
||||
|
||||
/// IfConverter - This pass performs machine code if conversion.
|
||||
extern char &IfConverterID;
|
||||
|
||||
/// MachineBlockPlacement - This pass places basic blocks based on branch
|
||||
/// probabilities.
|
||||
extern char &MachineBlockPlacementID;
|
||||
|
||||
/// MachineBlockPlacementStats - This pass collects statistics about the
|
||||
/// basic block placement using branch probabilities and block frequency
|
||||
/// information.
|
||||
extern char &MachineBlockPlacementStatsID;
|
||||
|
||||
/// GCLowering Pass - Performs target-independent LLVM IR transformations for
|
||||
/// highly portable strategies.
|
||||
///
|
||||
FunctionPass *createGCLoweringPass();
|
||||
|
||||
/// GCMachineCodeAnalysis - Target-independent pass to mark safe points
|
||||
/// in machine code. Must be added very late during code generation, just
|
||||
/// prior to output, and importantly after all CFG transformations (such as
|
||||
/// branch folding).
|
||||
extern char &GCMachineCodeAnalysisID;
|
||||
|
||||
/// Creates a pass to print GC metadata.
|
||||
///
|
||||
FunctionPass *createGCInfoPrinter(raw_ostream &OS);
|
||||
|
||||
/// MachineCSE - This pass performs global CSE on machine instructions.
|
||||
extern char &MachineCSEID;
|
||||
|
||||
/// MachineLICM - This pass performs LICM on machine instructions.
|
||||
extern char &MachineLICMID;
|
||||
|
||||
/// MachineSinking - This pass performs sinking on machine instructions.
|
||||
extern char &MachineSinkingID;
|
||||
|
||||
/// MachineCopyPropagation - This pass performs copy propagation on
|
||||
/// machine instructions.
|
||||
extern char &MachineCopyPropagationID;
|
||||
|
||||
/// PeepholeOptimizer - This pass performs peephole optimizations -
|
||||
/// like extension and comparison eliminations.
|
||||
extern char &PeepholeOptimizerID;
|
||||
|
||||
/// OptimizePHIs - This pass optimizes machine instruction PHIs
|
||||
/// to take advantage of opportunities created during DAG legalization.
|
||||
extern char &OptimizePHIsID;
|
||||
|
||||
/// StackSlotColoring - This pass performs stack slot coloring.
|
||||
extern char &StackSlotColoringID;
|
||||
|
||||
/// createStackProtectorPass - This pass adds stack protectors to functions.
|
||||
///
|
||||
FunctionPass *createStackProtectorPass(const TargetLoweringBase *tli);
|
||||
|
||||
/// createMachineVerifierPass - This pass verifies cenerated machine code
|
||||
/// instructions for correctness.
|
||||
///
|
||||
FunctionPass *createMachineVerifierPass(const char *Banner = 0);
|
||||
|
||||
/// createDwarfEHPass - This pass mulches exception handling code into a form
|
||||
/// adapted to code generation. Required if using dwarf exception handling.
|
||||
FunctionPass *createDwarfEHPass(const TargetMachine *tm);
|
||||
|
||||
/// createSjLjEHPreparePass - This pass adapts exception handling code to use
|
||||
/// the GCC-style builtin setjmp/longjmp (sjlj) to handling EH control flow.
|
||||
///
|
||||
FunctionPass *createSjLjEHPreparePass(const TargetLoweringBase *tli);
|
||||
|
||||
/// LocalStackSlotAllocation - This pass assigns local frame indices to stack
|
||||
/// slots relative to one another and allocates base registers to access them
|
||||
/// when it is estimated by the target to be out of range of normal frame
|
||||
/// pointer or stack pointer index addressing.
|
||||
extern char &LocalStackSlotAllocationID;
|
||||
|
||||
/// ExpandISelPseudos - This pass expands pseudo-instructions.
|
||||
extern char &ExpandISelPseudosID;
|
||||
|
||||
/// createExecutionDependencyFixPass - This pass fixes execution time
|
||||
/// problems with dependent instructions, such as switching execution
|
||||
/// domains to match.
|
||||
///
|
||||
/// The pass will examine instructions using and defining registers in RC.
|
||||
///
|
||||
FunctionPass *createExecutionDependencyFixPass(const TargetRegisterClass *RC);
|
||||
|
||||
/// UnpackMachineBundles - This pass unpack machine instruction bundles.
|
||||
extern char &UnpackMachineBundlesID;
|
||||
|
||||
/// FinalizeMachineBundles - This pass finalize machine instruction
|
||||
/// bundles (created earlier, e.g. during pre-RA scheduling).
|
||||
extern char &FinalizeMachineBundlesID;
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
108
thirdparty/clang/include/llvm/CodeGen/PseudoSourceValue.h
vendored
Normal file
108
thirdparty/clang/include/llvm/CodeGen/PseudoSourceValue.h
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
//===-- llvm/CodeGen/PseudoSourceValue.h ------------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file contains the declaration of the PseudoSourceValue class.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_PSEUDOSOURCEVALUE_H
|
||||
#define LLVM_CODEGEN_PSEUDOSOURCEVALUE_H
|
||||
|
||||
#include "llvm/IR/Value.h"
|
||||
|
||||
namespace llvm {
|
||||
class MachineFrameInfo;
|
||||
class raw_ostream;
|
||||
|
||||
/// PseudoSourceValue - Special value supplied for machine level alias
|
||||
/// analysis. It indicates that a memory access references the functions
|
||||
/// stack frame (e.g., a spill slot), below the stack frame (e.g., argument
|
||||
/// space), or constant pool.
|
||||
class PseudoSourceValue : public Value {
|
||||
private:
|
||||
/// printCustom - Implement printing for PseudoSourceValue. This is called
|
||||
/// from Value::print or Value's operator<<.
|
||||
///
|
||||
virtual void printCustom(raw_ostream &O) const;
|
||||
|
||||
public:
|
||||
explicit PseudoSourceValue(enum ValueTy Subclass = PseudoSourceValueVal);
|
||||
|
||||
/// isConstant - Test whether the memory pointed to by this
|
||||
/// PseudoSourceValue has a constant value.
|
||||
///
|
||||
virtual bool isConstant(const MachineFrameInfo *) const;
|
||||
|
||||
/// isAliased - Test whether the memory pointed to by this
|
||||
/// PseudoSourceValue may also be pointed to by an LLVM IR Value.
|
||||
virtual bool isAliased(const MachineFrameInfo *) const;
|
||||
|
||||
/// mayAlias - Return true if the memory pointed to by this
|
||||
/// PseudoSourceValue can ever alias a LLVM IR Value.
|
||||
virtual bool mayAlias(const MachineFrameInfo *) const;
|
||||
|
||||
/// classof - Methods for support type inquiry through isa, cast, and
|
||||
/// dyn_cast:
|
||||
///
|
||||
static inline bool classof(const Value *V) {
|
||||
return V->getValueID() == PseudoSourceValueVal ||
|
||||
V->getValueID() == FixedStackPseudoSourceValueVal;
|
||||
}
|
||||
|
||||
/// A pseudo source value referencing a fixed stack frame entry,
|
||||
/// e.g., a spill slot.
|
||||
static const PseudoSourceValue *getFixedStack(int FI);
|
||||
|
||||
/// A pseudo source value referencing the area below the stack frame of
|
||||
/// a function, e.g., the argument space.
|
||||
static const PseudoSourceValue *getStack();
|
||||
|
||||
/// A pseudo source value referencing the global offset table
|
||||
/// (or something the like).
|
||||
static const PseudoSourceValue *getGOT();
|
||||
|
||||
/// A pseudo source value referencing the constant pool. Since constant
|
||||
/// pools are constant, this doesn't need to identify a specific constant
|
||||
/// pool entry.
|
||||
static const PseudoSourceValue *getConstantPool();
|
||||
|
||||
/// A pseudo source value referencing a jump table. Since jump tables are
|
||||
/// constant, this doesn't need to identify a specific jump table.
|
||||
static const PseudoSourceValue *getJumpTable();
|
||||
};
|
||||
|
||||
/// FixedStackPseudoSourceValue - A specialized PseudoSourceValue
|
||||
/// for holding FixedStack values, which must include a frame
|
||||
/// index.
|
||||
class FixedStackPseudoSourceValue : public PseudoSourceValue {
|
||||
const int FI;
|
||||
public:
|
||||
explicit FixedStackPseudoSourceValue(int fi) :
|
||||
PseudoSourceValue(FixedStackPseudoSourceValueVal), FI(fi) {}
|
||||
|
||||
/// classof - Methods for support type inquiry through isa, cast, and
|
||||
/// dyn_cast:
|
||||
///
|
||||
static inline bool classof(const Value *V) {
|
||||
return V->getValueID() == FixedStackPseudoSourceValueVal;
|
||||
}
|
||||
|
||||
virtual bool isConstant(const MachineFrameInfo *MFI) const;
|
||||
|
||||
virtual bool isAliased(const MachineFrameInfo *MFI) const;
|
||||
|
||||
virtual bool mayAlias(const MachineFrameInfo *) const;
|
||||
|
||||
virtual void printCustom(raw_ostream &OS) const;
|
||||
|
||||
int getFrameIndex() const { return FI; }
|
||||
};
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
165
thirdparty/clang/include/llvm/CodeGen/RegAllocPBQP.h
vendored
Normal file
165
thirdparty/clang/include/llvm/CodeGen/RegAllocPBQP.h
vendored
Normal file
@@ -0,0 +1,165 @@
|
||||
//===-- RegAllocPBQP.h ------------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines the PBQPBuilder interface, for classes which build PBQP
|
||||
// instances to represent register allocation problems, and the RegAllocPBQP
|
||||
// interface.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_REGALLOCPBQP_H
|
||||
#define LLVM_CODEGEN_REGALLOCPBQP_H
|
||||
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
#include "llvm/CodeGen/PBQP/Graph.h"
|
||||
#include "llvm/CodeGen/PBQP/Solution.h"
|
||||
#include <map>
|
||||
#include <set>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class LiveIntervals;
|
||||
class MachineFunction;
|
||||
class MachineLoopInfo;
|
||||
class TargetRegisterInfo;
|
||||
template<class T> class OwningPtr;
|
||||
|
||||
/// This class wraps up a PBQP instance representing a register allocation
|
||||
/// problem, plus the structures necessary to map back from the PBQP solution
|
||||
/// to a register allocation solution. (i.e. The PBQP-node <--> vreg map,
|
||||
/// and the PBQP option <--> storage location map).
|
||||
|
||||
class PBQPRAProblem {
|
||||
public:
|
||||
|
||||
typedef SmallVector<unsigned, 16> AllowedSet;
|
||||
|
||||
PBQP::Graph& getGraph() { return graph; }
|
||||
|
||||
const PBQP::Graph& getGraph() const { return graph; }
|
||||
|
||||
/// Record the mapping between the given virtual register and PBQP node,
|
||||
/// and the set of allowed pregs for the vreg.
|
||||
///
|
||||
/// If you are extending
|
||||
/// PBQPBuilder you are unlikely to need this: Nodes and options for all
|
||||
/// vregs will already have been set up for you by the base class.
|
||||
template <typename AllowedRegsItr>
|
||||
void recordVReg(unsigned vreg, PBQP::Graph::NodeItr node,
|
||||
AllowedRegsItr arBegin, AllowedRegsItr arEnd) {
|
||||
assert(node2VReg.find(node) == node2VReg.end() && "Re-mapping node.");
|
||||
assert(vreg2Node.find(vreg) == vreg2Node.end() && "Re-mapping vreg.");
|
||||
assert(allowedSets[vreg].empty() && "vreg already has pregs.");
|
||||
|
||||
node2VReg[node] = vreg;
|
||||
vreg2Node[vreg] = node;
|
||||
std::copy(arBegin, arEnd, std::back_inserter(allowedSets[vreg]));
|
||||
}
|
||||
|
||||
/// Get the virtual register corresponding to the given PBQP node.
|
||||
unsigned getVRegForNode(PBQP::Graph::ConstNodeItr node) const;
|
||||
|
||||
/// Get the PBQP node corresponding to the given virtual register.
|
||||
PBQP::Graph::NodeItr getNodeForVReg(unsigned vreg) const;
|
||||
|
||||
/// Returns true if the given PBQP option represents a physical register,
|
||||
/// false otherwise.
|
||||
bool isPRegOption(unsigned vreg, unsigned option) const {
|
||||
// At present we only have spills or pregs, so anything that's not a
|
||||
// spill is a preg. (This might be extended one day to support remat).
|
||||
return !isSpillOption(vreg, option);
|
||||
}
|
||||
|
||||
/// Returns true if the given PBQP option represents spilling, false
|
||||
/// otherwise.
|
||||
bool isSpillOption(unsigned vreg, unsigned option) const {
|
||||
// We hardcode option zero as the spill option.
|
||||
return option == 0;
|
||||
}
|
||||
|
||||
/// Returns the allowed set for the given virtual register.
|
||||
const AllowedSet& getAllowedSet(unsigned vreg) const;
|
||||
|
||||
/// Get PReg for option.
|
||||
unsigned getPRegForOption(unsigned vreg, unsigned option) const;
|
||||
|
||||
private:
|
||||
|
||||
typedef std::map<PBQP::Graph::ConstNodeItr, unsigned,
|
||||
PBQP::NodeItrComparator> Node2VReg;
|
||||
typedef DenseMap<unsigned, PBQP::Graph::NodeItr> VReg2Node;
|
||||
typedef DenseMap<unsigned, AllowedSet> AllowedSetMap;
|
||||
|
||||
PBQP::Graph graph;
|
||||
Node2VReg node2VReg;
|
||||
VReg2Node vreg2Node;
|
||||
|
||||
AllowedSetMap allowedSets;
|
||||
|
||||
};
|
||||
|
||||
/// Builds PBQP instances to represent register allocation problems. Includes
|
||||
/// spill, interference and coalescing costs by default. You can extend this
|
||||
/// class to support additional constraints for your architecture.
|
||||
class PBQPBuilder {
|
||||
private:
|
||||
PBQPBuilder(const PBQPBuilder&) LLVM_DELETED_FUNCTION;
|
||||
void operator=(const PBQPBuilder&) LLVM_DELETED_FUNCTION;
|
||||
public:
|
||||
|
||||
typedef std::set<unsigned> RegSet;
|
||||
|
||||
/// Default constructor.
|
||||
PBQPBuilder() {}
|
||||
|
||||
/// Clean up a PBQPBuilder.
|
||||
virtual ~PBQPBuilder() {}
|
||||
|
||||
/// Build a PBQP instance to represent the register allocation problem for
|
||||
/// the given MachineFunction.
|
||||
virtual PBQPRAProblem *build(MachineFunction *mf, const LiveIntervals *lis,
|
||||
const MachineLoopInfo *loopInfo,
|
||||
const RegSet &vregs);
|
||||
private:
|
||||
|
||||
void addSpillCosts(PBQP::Vector &costVec, PBQP::PBQPNum spillCost);
|
||||
|
||||
void addInterferenceCosts(PBQP::Matrix &costMat,
|
||||
const PBQPRAProblem::AllowedSet &vr1Allowed,
|
||||
const PBQPRAProblem::AllowedSet &vr2Allowed,
|
||||
const TargetRegisterInfo *tri);
|
||||
};
|
||||
|
||||
/// Extended builder which adds coalescing constraints to a problem.
|
||||
class PBQPBuilderWithCoalescing : public PBQPBuilder {
|
||||
public:
|
||||
|
||||
/// Build a PBQP instance to represent the register allocation problem for
|
||||
/// the given MachineFunction.
|
||||
virtual PBQPRAProblem *build(MachineFunction *mf, const LiveIntervals *lis,
|
||||
const MachineLoopInfo *loopInfo,
|
||||
const RegSet &vregs);
|
||||
|
||||
private:
|
||||
|
||||
void addPhysRegCoalesce(PBQP::Vector &costVec, unsigned pregOption,
|
||||
PBQP::PBQPNum benefit);
|
||||
|
||||
void addVirtRegCoalesce(PBQP::Matrix &costMat,
|
||||
const PBQPRAProblem::AllowedSet &vr1Allowed,
|
||||
const PBQPRAProblem::AllowedSet &vr2Allowed,
|
||||
PBQP::PBQPNum benefit);
|
||||
};
|
||||
|
||||
FunctionPass* createPBQPRegisterAllocator(OwningPtr<PBQPBuilder> &builder,
|
||||
char *customPassID=0);
|
||||
}
|
||||
|
||||
#endif /* LLVM_CODEGEN_REGALLOCPBQP_H */
|
||||
66
thirdparty/clang/include/llvm/CodeGen/RegAllocRegistry.h
vendored
Normal file
66
thirdparty/clang/include/llvm/CodeGen/RegAllocRegistry.h
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
//===-- llvm/CodeGen/RegAllocRegistry.h -------------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file contains the implementation for register allocator function
|
||||
// pass registry (RegisterRegAlloc).
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_REGALLOCREGISTRY_H
|
||||
#define LLVM_CODEGEN_REGALLOCREGISTRY_H
|
||||
|
||||
#include "llvm/CodeGen/MachinePassRegistry.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
///
|
||||
/// RegisterRegAlloc class - Track the registration of register allocators.
|
||||
///
|
||||
//===----------------------------------------------------------------------===//
|
||||
class RegisterRegAlloc : public MachinePassRegistryNode {
|
||||
|
||||
public:
|
||||
|
||||
typedef FunctionPass *(*FunctionPassCtor)();
|
||||
|
||||
static MachinePassRegistry Registry;
|
||||
|
||||
RegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
|
||||
: MachinePassRegistryNode(N, D, (MachinePassCtor)C)
|
||||
{
|
||||
Registry.Add(this);
|
||||
}
|
||||
~RegisterRegAlloc() { Registry.Remove(this); }
|
||||
|
||||
|
||||
// Accessors.
|
||||
//
|
||||
RegisterRegAlloc *getNext() const {
|
||||
return (RegisterRegAlloc *)MachinePassRegistryNode::getNext();
|
||||
}
|
||||
static RegisterRegAlloc *getList() {
|
||||
return (RegisterRegAlloc *)Registry.getList();
|
||||
}
|
||||
static FunctionPassCtor getDefault() {
|
||||
return (FunctionPassCtor)Registry.getDefault();
|
||||
}
|
||||
static void setDefault(FunctionPassCtor C) {
|
||||
Registry.setDefault((MachinePassCtor)C);
|
||||
}
|
||||
static void setListener(MachinePassRegistryListener *L) {
|
||||
Registry.setListener(L);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
} // end namespace llvm
|
||||
|
||||
|
||||
#endif
|
||||
133
thirdparty/clang/include/llvm/CodeGen/RegisterClassInfo.h
vendored
Normal file
133
thirdparty/clang/include/llvm/CodeGen/RegisterClassInfo.h
vendored
Normal file
@@ -0,0 +1,133 @@
|
||||
//===-- RegisterClassInfo.h - Dynamic Register Class Info -*- C++ -*-------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file implements the RegisterClassInfo class which provides dynamic
|
||||
// information about target register classes. Callee saved and reserved
|
||||
// registers depends on calling conventions and other dynamic information, so
|
||||
// some things cannot be determined statically.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_REGISTERCLASSINFO_H
|
||||
#define LLVM_CODEGEN_REGISTERCLASSINFO_H
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/BitVector.h"
|
||||
#include "llvm/ADT/OwningPtr.h"
|
||||
#include "llvm/Target/TargetRegisterInfo.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class RegisterClassInfo {
|
||||
struct RCInfo {
|
||||
unsigned Tag;
|
||||
unsigned NumRegs;
|
||||
bool ProperSubClass;
|
||||
uint8_t MinCost;
|
||||
uint16_t LastCostChange;
|
||||
OwningArrayPtr<MCPhysReg> Order;
|
||||
|
||||
RCInfo()
|
||||
: Tag(0), NumRegs(0), ProperSubClass(false), MinCost(0),
|
||||
LastCostChange(0) {}
|
||||
|
||||
operator ArrayRef<MCPhysReg>() const {
|
||||
return makeArrayRef(Order.get(), NumRegs);
|
||||
}
|
||||
};
|
||||
|
||||
// Brief cached information for each register class.
|
||||
OwningArrayPtr<RCInfo> RegClass;
|
||||
|
||||
// Tag changes whenever cached information needs to be recomputed. An RCInfo
|
||||
// entry is valid when its tag matches.
|
||||
unsigned Tag;
|
||||
|
||||
const MachineFunction *MF;
|
||||
const TargetRegisterInfo *TRI;
|
||||
|
||||
// Callee saved registers of last MF. Assumed to be valid until the next
|
||||
// runOnFunction() call.
|
||||
const uint16_t *CalleeSaved;
|
||||
|
||||
// Map register number to CalleeSaved index + 1;
|
||||
SmallVector<uint8_t, 4> CSRNum;
|
||||
|
||||
// Reserved registers in the current MF.
|
||||
BitVector Reserved;
|
||||
|
||||
// Compute all information about RC.
|
||||
void compute(const TargetRegisterClass *RC) const;
|
||||
|
||||
// Return an up-to-date RCInfo for RC.
|
||||
const RCInfo &get(const TargetRegisterClass *RC) const {
|
||||
const RCInfo &RCI = RegClass[RC->getID()];
|
||||
if (Tag != RCI.Tag)
|
||||
compute(RC);
|
||||
return RCI;
|
||||
}
|
||||
|
||||
public:
|
||||
RegisterClassInfo();
|
||||
|
||||
/// runOnFunction - Prepare to answer questions about MF. This must be called
|
||||
/// before any other methods are used.
|
||||
void runOnMachineFunction(const MachineFunction &MF);
|
||||
|
||||
/// getNumAllocatableRegs - Returns the number of actually allocatable
|
||||
/// registers in RC in the current function.
|
||||
unsigned getNumAllocatableRegs(const TargetRegisterClass *RC) const {
|
||||
return get(RC).NumRegs;
|
||||
}
|
||||
|
||||
/// getOrder - Returns the preferred allocation order for RC. The order
|
||||
/// contains no reserved registers, and registers that alias callee saved
|
||||
/// registers come last.
|
||||
ArrayRef<MCPhysReg> getOrder(const TargetRegisterClass *RC) const {
|
||||
return get(RC);
|
||||
}
|
||||
|
||||
/// isProperSubClass - Returns true if RC has a legal super-class with more
|
||||
/// allocatable registers.
|
||||
///
|
||||
/// Register classes like GR32_NOSP are not proper sub-classes because %esp
|
||||
/// is not allocatable. Similarly, tGPR is not a proper sub-class in Thumb
|
||||
/// mode because the GPR super-class is not legal.
|
||||
bool isProperSubClass(const TargetRegisterClass *RC) const {
|
||||
return get(RC).ProperSubClass;
|
||||
}
|
||||
|
||||
/// getLastCalleeSavedAlias - Returns the last callee saved register that
|
||||
/// overlaps PhysReg, or 0 if Reg doesn't overlap a CSR.
|
||||
unsigned getLastCalleeSavedAlias(unsigned PhysReg) const {
|
||||
assert(TargetRegisterInfo::isPhysicalRegister(PhysReg));
|
||||
if (unsigned N = CSRNum[PhysReg])
|
||||
return CalleeSaved[N-1];
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Get the minimum register cost in RC's allocation order.
|
||||
/// This is the smallest value returned by TRI->getCostPerUse(Reg) for all
|
||||
/// the registers in getOrder(RC).
|
||||
unsigned getMinCost(const TargetRegisterClass *RC) {
|
||||
return get(RC).MinCost;
|
||||
}
|
||||
|
||||
/// Get the position of the last cost change in getOrder(RC).
|
||||
///
|
||||
/// All registers in getOrder(RC).slice(getLastCostChange(RC)) will have the
|
||||
/// same cost according to TRI->getCostPerUse().
|
||||
unsigned getLastCostChange(const TargetRegisterClass *RC) {
|
||||
return get(RC).LastCostChange;
|
||||
}
|
||||
};
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
|
||||
320
thirdparty/clang/include/llvm/CodeGen/RegisterPressure.h
vendored
Normal file
320
thirdparty/clang/include/llvm/CodeGen/RegisterPressure.h
vendored
Normal file
@@ -0,0 +1,320 @@
|
||||
//===-- RegisterPressure.h - Dynamic Register Pressure -*- C++ -*-------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines the RegisterPressure class which can be used to track
|
||||
// MachineInstr level register pressure.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_REGISTERPRESSURE_H
|
||||
#define LLVM_CODEGEN_REGISTERPRESSURE_H
|
||||
|
||||
#include "llvm/ADT/SparseSet.h"
|
||||
#include "llvm/CodeGen/SlotIndexes.h"
|
||||
#include "llvm/Target/TargetRegisterInfo.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class LiveIntervals;
|
||||
class LiveInterval;
|
||||
class RegisterClassInfo;
|
||||
class MachineInstr;
|
||||
|
||||
/// Base class for register pressure results.
|
||||
struct RegisterPressure {
|
||||
/// Map of max reg pressure indexed by pressure set ID, not class ID.
|
||||
std::vector<unsigned> MaxSetPressure;
|
||||
|
||||
/// List of live in virtual registers or physical register units.
|
||||
SmallVector<unsigned,8> LiveInRegs;
|
||||
SmallVector<unsigned,8> LiveOutRegs;
|
||||
|
||||
/// Increase register pressure for each pressure set impacted by this register
|
||||
/// class. Normally called by RegPressureTracker, but may be called manually
|
||||
/// to account for live through (global liveness).
|
||||
///
|
||||
/// \param Reg is either a virtual register number or register unit number.
|
||||
void increase(unsigned Reg, const TargetRegisterInfo *TRI,
|
||||
const MachineRegisterInfo *MRI);
|
||||
|
||||
/// Decrease register pressure for each pressure set impacted by this register
|
||||
/// class. This is only useful to account for spilling or rematerialization.
|
||||
///
|
||||
/// \param Reg is either a virtual register number or register unit number.
|
||||
void decrease(unsigned Reg, const TargetRegisterInfo *TRI,
|
||||
const MachineRegisterInfo *MRI);
|
||||
|
||||
void dump(const TargetRegisterInfo *TRI) const;
|
||||
};
|
||||
|
||||
/// RegisterPressure computed within a region of instructions delimited by
|
||||
/// TopIdx and BottomIdx. During pressure computation, the maximum pressure per
|
||||
/// register pressure set is increased. Once pressure within a region is fully
|
||||
/// computed, the live-in and live-out sets are recorded.
|
||||
///
|
||||
/// This is preferable to RegionPressure when LiveIntervals are available,
|
||||
/// because delimiting regions by SlotIndex is more robust and convenient than
|
||||
/// holding block iterators. The block contents can change without invalidating
|
||||
/// the pressure result.
|
||||
struct IntervalPressure : RegisterPressure {
|
||||
/// Record the boundary of the region being tracked.
|
||||
SlotIndex TopIdx;
|
||||
SlotIndex BottomIdx;
|
||||
|
||||
void reset();
|
||||
|
||||
void openTop(SlotIndex NextTop);
|
||||
|
||||
void openBottom(SlotIndex PrevBottom);
|
||||
};
|
||||
|
||||
/// RegisterPressure computed within a region of instructions delimited by
|
||||
/// TopPos and BottomPos. This is a less precise version of IntervalPressure for
|
||||
/// use when LiveIntervals are unavailable.
|
||||
struct RegionPressure : RegisterPressure {
|
||||
/// Record the boundary of the region being tracked.
|
||||
MachineBasicBlock::const_iterator TopPos;
|
||||
MachineBasicBlock::const_iterator BottomPos;
|
||||
|
||||
void reset();
|
||||
|
||||
void openTop(MachineBasicBlock::const_iterator PrevTop);
|
||||
|
||||
void openBottom(MachineBasicBlock::const_iterator PrevBottom);
|
||||
};
|
||||
|
||||
/// An element of pressure difference that identifies the pressure set and
|
||||
/// amount of increase or decrease in units of pressure.
|
||||
struct PressureElement {
|
||||
unsigned PSetID;
|
||||
int UnitIncrease;
|
||||
|
||||
PressureElement(): PSetID(~0U), UnitIncrease(0) {}
|
||||
PressureElement(unsigned id, int inc): PSetID(id), UnitIncrease(inc) {}
|
||||
|
||||
bool isValid() const { return PSetID != ~0U; }
|
||||
};
|
||||
|
||||
/// Store the effects of a change in pressure on things that MI scheduler cares
|
||||
/// about.
|
||||
///
|
||||
/// Excess records the value of the largest difference in register units beyond
|
||||
/// the target's pressure limits across the affected pressure sets, where
|
||||
/// largest is defined as the absolute value of the difference. Negative
|
||||
/// ExcessUnits indicates a reduction in pressure that had already exceeded the
|
||||
/// target's limits.
|
||||
///
|
||||
/// CriticalMax records the largest increase in the tracker's max pressure that
|
||||
/// exceeds the critical limit for some pressure set determined by the client.
|
||||
///
|
||||
/// CurrentMax records the largest increase in the tracker's max pressure that
|
||||
/// exceeds the current limit for some pressure set determined by the client.
|
||||
struct RegPressureDelta {
|
||||
PressureElement Excess;
|
||||
PressureElement CriticalMax;
|
||||
PressureElement CurrentMax;
|
||||
|
||||
RegPressureDelta() {}
|
||||
};
|
||||
|
||||
/// \brief A set of live virtual registers and physical register units.
|
||||
///
|
||||
/// Virtual and physical register numbers require separate sparse sets, but most
|
||||
/// of the RegisterPressureTracker handles them uniformly.
|
||||
struct LiveRegSet {
|
||||
SparseSet<unsigned> PhysRegs;
|
||||
SparseSet<unsigned, VirtReg2IndexFunctor> VirtRegs;
|
||||
|
||||
bool contains(unsigned Reg) {
|
||||
if (TargetRegisterInfo::isVirtualRegister(Reg))
|
||||
return VirtRegs.count(Reg);
|
||||
return PhysRegs.count(Reg);
|
||||
}
|
||||
|
||||
bool insert(unsigned Reg) {
|
||||
if (TargetRegisterInfo::isVirtualRegister(Reg))
|
||||
return VirtRegs.insert(Reg).second;
|
||||
return PhysRegs.insert(Reg).second;
|
||||
}
|
||||
|
||||
bool erase(unsigned Reg) {
|
||||
if (TargetRegisterInfo::isVirtualRegister(Reg))
|
||||
return VirtRegs.erase(Reg);
|
||||
return PhysRegs.erase(Reg);
|
||||
}
|
||||
};
|
||||
|
||||
/// Track the current register pressure at some position in the instruction
|
||||
/// stream, and remember the high water mark within the region traversed. This
|
||||
/// does not automatically consider live-through ranges. The client may
|
||||
/// independently adjust for global liveness.
|
||||
///
|
||||
/// Each RegPressureTracker only works within a MachineBasicBlock. Pressure can
|
||||
/// be tracked across a larger region by storing a RegisterPressure result at
|
||||
/// each block boundary and explicitly adjusting pressure to account for block
|
||||
/// live-in and live-out register sets.
|
||||
///
|
||||
/// RegPressureTracker holds a reference to a RegisterPressure result that it
|
||||
/// computes incrementally. During downward tracking, P.BottomIdx or P.BottomPos
|
||||
/// is invalid until it reaches the end of the block or closeRegion() is
|
||||
/// explicitly called. Similarly, P.TopIdx is invalid during upward
|
||||
/// tracking. Changing direction has the side effect of closing region, and
|
||||
/// traversing past TopIdx or BottomIdx reopens it.
|
||||
class RegPressureTracker {
|
||||
const MachineFunction *MF;
|
||||
const TargetRegisterInfo *TRI;
|
||||
const RegisterClassInfo *RCI;
|
||||
const MachineRegisterInfo *MRI;
|
||||
const LiveIntervals *LIS;
|
||||
|
||||
/// We currently only allow pressure tracking within a block.
|
||||
const MachineBasicBlock *MBB;
|
||||
|
||||
/// Track the max pressure within the region traversed so far.
|
||||
RegisterPressure &P;
|
||||
|
||||
/// Run in two modes dependending on whether constructed with IntervalPressure
|
||||
/// or RegisterPressure. If requireIntervals is false, LIS are ignored.
|
||||
bool RequireIntervals;
|
||||
|
||||
/// Register pressure corresponds to liveness before this instruction
|
||||
/// iterator. It may point to the end of the block or a DebugValue rather than
|
||||
/// an instruction.
|
||||
MachineBasicBlock::const_iterator CurrPos;
|
||||
|
||||
/// Pressure map indexed by pressure set ID, not class ID.
|
||||
std::vector<unsigned> CurrSetPressure;
|
||||
|
||||
/// Set of live registers.
|
||||
LiveRegSet LiveRegs;
|
||||
|
||||
public:
|
||||
RegPressureTracker(IntervalPressure &rp) :
|
||||
MF(0), TRI(0), RCI(0), LIS(0), MBB(0), P(rp), RequireIntervals(true) {}
|
||||
|
||||
RegPressureTracker(RegionPressure &rp) :
|
||||
MF(0), TRI(0), RCI(0), LIS(0), MBB(0), P(rp), RequireIntervals(false) {}
|
||||
|
||||
void init(const MachineFunction *mf, const RegisterClassInfo *rci,
|
||||
const LiveIntervals *lis, const MachineBasicBlock *mbb,
|
||||
MachineBasicBlock::const_iterator pos);
|
||||
|
||||
/// Force liveness of virtual registers or physical register
|
||||
/// units. Particularly useful to initialize the livein/out state of the
|
||||
/// tracker before the first call to advance/recede.
|
||||
void addLiveRegs(ArrayRef<unsigned> Regs);
|
||||
|
||||
/// Get the MI position corresponding to this register pressure.
|
||||
MachineBasicBlock::const_iterator getPos() const { return CurrPos; }
|
||||
|
||||
// Reset the MI position corresponding to the register pressure. This allows
|
||||
// schedulers to move instructions above the RegPressureTracker's
|
||||
// CurrPos. Since the pressure is computed before CurrPos, the iterator
|
||||
// position changes while pressure does not.
|
||||
void setPos(MachineBasicBlock::const_iterator Pos) { CurrPos = Pos; }
|
||||
|
||||
/// \brief Get the SlotIndex for the first nondebug instruction including or
|
||||
/// after the current position.
|
||||
SlotIndex getCurrSlot() const;
|
||||
|
||||
/// Recede across the previous instruction.
|
||||
bool recede();
|
||||
|
||||
/// Advance across the current instruction.
|
||||
bool advance();
|
||||
|
||||
/// Finalize the region boundaries and recored live ins and live outs.
|
||||
void closeRegion();
|
||||
|
||||
/// Get the resulting register pressure over the traversed region.
|
||||
/// This result is complete if either advance() or recede() has returned true,
|
||||
/// or if closeRegion() was explicitly invoked.
|
||||
RegisterPressure &getPressure() { return P; }
|
||||
const RegisterPressure &getPressure() const { return P; }
|
||||
|
||||
/// Get the register set pressure at the current position, which may be less
|
||||
/// than the pressure across the traversed region.
|
||||
std::vector<unsigned> &getRegSetPressureAtPos() { return CurrSetPressure; }
|
||||
|
||||
void discoverLiveOut(unsigned Reg);
|
||||
void discoverLiveIn(unsigned Reg);
|
||||
|
||||
bool isTopClosed() const;
|
||||
bool isBottomClosed() const;
|
||||
|
||||
void closeTop();
|
||||
void closeBottom();
|
||||
|
||||
/// Consider the pressure increase caused by traversing this instruction
|
||||
/// bottom-up. Find the pressure set with the most change beyond its pressure
|
||||
/// limit based on the tracker's current pressure, and record the number of
|
||||
/// excess register units of that pressure set introduced by this instruction.
|
||||
void getMaxUpwardPressureDelta(const MachineInstr *MI,
|
||||
RegPressureDelta &Delta,
|
||||
ArrayRef<PressureElement> CriticalPSets,
|
||||
ArrayRef<unsigned> MaxPressureLimit);
|
||||
|
||||
/// Consider the pressure increase caused by traversing this instruction
|
||||
/// top-down. Find the pressure set with the most change beyond its pressure
|
||||
/// limit based on the tracker's current pressure, and record the number of
|
||||
/// excess register units of that pressure set introduced by this instruction.
|
||||
void getMaxDownwardPressureDelta(const MachineInstr *MI,
|
||||
RegPressureDelta &Delta,
|
||||
ArrayRef<PressureElement> CriticalPSets,
|
||||
ArrayRef<unsigned> MaxPressureLimit);
|
||||
|
||||
/// Find the pressure set with the most change beyond its pressure limit after
|
||||
/// traversing this instruction either upward or downward depending on the
|
||||
/// closed end of the current region.
|
||||
void getMaxPressureDelta(const MachineInstr *MI, RegPressureDelta &Delta,
|
||||
ArrayRef<PressureElement> CriticalPSets,
|
||||
ArrayRef<unsigned> MaxPressureLimit) {
|
||||
if (isTopClosed())
|
||||
return getMaxDownwardPressureDelta(MI, Delta, CriticalPSets,
|
||||
MaxPressureLimit);
|
||||
|
||||
assert(isBottomClosed() && "Uninitialized pressure tracker");
|
||||
return getMaxUpwardPressureDelta(MI, Delta, CriticalPSets,
|
||||
MaxPressureLimit);
|
||||
}
|
||||
|
||||
/// Get the pressure of each PSet after traversing this instruction bottom-up.
|
||||
void getUpwardPressure(const MachineInstr *MI,
|
||||
std::vector<unsigned> &PressureResult,
|
||||
std::vector<unsigned> &MaxPressureResult);
|
||||
|
||||
/// Get the pressure of each PSet after traversing this instruction top-down.
|
||||
void getDownwardPressure(const MachineInstr *MI,
|
||||
std::vector<unsigned> &PressureResult,
|
||||
std::vector<unsigned> &MaxPressureResult);
|
||||
|
||||
void getPressureAfterInst(const MachineInstr *MI,
|
||||
std::vector<unsigned> &PressureResult,
|
||||
std::vector<unsigned> &MaxPressureResult) {
|
||||
if (isTopClosed())
|
||||
return getUpwardPressure(MI, PressureResult, MaxPressureResult);
|
||||
|
||||
assert(isBottomClosed() && "Uninitialized pressure tracker");
|
||||
return getDownwardPressure(MI, PressureResult, MaxPressureResult);
|
||||
}
|
||||
|
||||
void dump() const;
|
||||
|
||||
protected:
|
||||
const LiveInterval *getInterval(unsigned Reg) const;
|
||||
|
||||
void increaseRegPressure(ArrayRef<unsigned> Regs);
|
||||
void decreaseRegPressure(ArrayRef<unsigned> Regs);
|
||||
|
||||
void bumpUpwardPressure(const MachineInstr *MI);
|
||||
void bumpDownwardPressure(const MachineInstr *MI);
|
||||
};
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
208
thirdparty/clang/include/llvm/CodeGen/RegisterScavenging.h
vendored
Normal file
208
thirdparty/clang/include/llvm/CodeGen/RegisterScavenging.h
vendored
Normal file
@@ -0,0 +1,208 @@
|
||||
//===-- RegisterScavenging.h - Machine register scavenging ------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file declares the machine register scavenger class. It can provide
|
||||
// information such as unused register at any point in a machine basic block.
|
||||
// It also provides a mechanism to make registers availbale by evicting them
|
||||
// to spill slots.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_REGISTERSCAVENGING_H
|
||||
#define LLVM_CODEGEN_REGISTERSCAVENGING_H
|
||||
|
||||
#include "llvm/ADT/BitVector.h"
|
||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class MachineRegisterInfo;
|
||||
class TargetRegisterInfo;
|
||||
class TargetInstrInfo;
|
||||
class TargetRegisterClass;
|
||||
|
||||
class RegScavenger {
|
||||
const TargetRegisterInfo *TRI;
|
||||
const TargetInstrInfo *TII;
|
||||
MachineRegisterInfo* MRI;
|
||||
MachineBasicBlock *MBB;
|
||||
MachineBasicBlock::iterator MBBI;
|
||||
unsigned NumPhysRegs;
|
||||
|
||||
/// Tracking - True if RegScavenger is currently tracking the liveness of
|
||||
/// registers.
|
||||
bool Tracking;
|
||||
|
||||
/// Information on scavenged registers (held in a spill slot).
|
||||
struct ScavengedInfo {
|
||||
ScavengedInfo(int FI = -1) : FrameIndex(FI), Reg(0), Restore(NULL) {}
|
||||
|
||||
/// A spill slot used for scavenging a register post register allocation.
|
||||
int FrameIndex;
|
||||
|
||||
/// If non-zero, the specific register is currently being
|
||||
/// scavenged. That is, it is spilled to this scavenging stack slot.
|
||||
unsigned Reg;
|
||||
|
||||
/// The instruction that restores the scavenged register from stack.
|
||||
const MachineInstr *Restore;
|
||||
};
|
||||
|
||||
/// A vector of information on scavenged registers.
|
||||
SmallVector<ScavengedInfo, 2> Scavenged;
|
||||
|
||||
/// CalleeSavedrRegs - A bitvector of callee saved registers for the target.
|
||||
///
|
||||
BitVector CalleeSavedRegs;
|
||||
|
||||
/// RegsAvailable - The current state of all the physical registers immediately
|
||||
/// before MBBI. One bit per physical register. If bit is set that means it's
|
||||
/// available, unset means the register is currently being used.
|
||||
BitVector RegsAvailable;
|
||||
|
||||
// These BitVectors are only used internally to forward(). They are members
|
||||
// to avoid frequent reallocations.
|
||||
BitVector KillRegs, DefRegs;
|
||||
|
||||
public:
|
||||
RegScavenger()
|
||||
: MBB(NULL), NumPhysRegs(0), Tracking(false) {}
|
||||
|
||||
/// enterBasicBlock - Start tracking liveness from the begin of the specific
|
||||
/// basic block.
|
||||
void enterBasicBlock(MachineBasicBlock *mbb);
|
||||
|
||||
/// initRegState - allow resetting register state info for multiple
|
||||
/// passes over/within the same function.
|
||||
void initRegState();
|
||||
|
||||
/// forward - Move the internal MBB iterator and update register states.
|
||||
void forward();
|
||||
|
||||
/// forward - Move the internal MBB iterator and update register states until
|
||||
/// it has processed the specific iterator.
|
||||
void forward(MachineBasicBlock::iterator I) {
|
||||
if (!Tracking && MBB->begin() != I) forward();
|
||||
while (MBBI != I) forward();
|
||||
}
|
||||
|
||||
/// Invert the behavior of forward() on the current instruction (undo the
|
||||
/// changes to the available registers made by forward()).
|
||||
void unprocess();
|
||||
|
||||
/// Unprocess instructions until you reach the provided iterator.
|
||||
void unprocess(MachineBasicBlock::iterator I) {
|
||||
while (MBBI != I) unprocess();
|
||||
}
|
||||
|
||||
/// skipTo - Move the internal MBB iterator but do not update register states.
|
||||
void skipTo(MachineBasicBlock::iterator I) {
|
||||
if (I == MachineBasicBlock::iterator(NULL))
|
||||
Tracking = false;
|
||||
MBBI = I;
|
||||
}
|
||||
|
||||
MachineBasicBlock::iterator getCurrentPosition() const {
|
||||
return MBBI;
|
||||
}
|
||||
|
||||
/// getRegsUsed - return all registers currently in use in used.
|
||||
void getRegsUsed(BitVector &used, bool includeReserved);
|
||||
|
||||
/// getRegsAvailable - Return all available registers in the register class
|
||||
/// in Mask.
|
||||
BitVector getRegsAvailable(const TargetRegisterClass *RC);
|
||||
|
||||
/// FindUnusedReg - Find a unused register of the specified register class.
|
||||
/// Return 0 if none is found.
|
||||
unsigned FindUnusedReg(const TargetRegisterClass *RegClass) const;
|
||||
|
||||
/// Add a scavenging frame index.
|
||||
void addScavengingFrameIndex(int FI) {
|
||||
Scavenged.push_back(ScavengedInfo(FI));
|
||||
}
|
||||
|
||||
/// Query whether a frame index is a scavenging frame index.
|
||||
bool isScavengingFrameIndex(int FI) const {
|
||||
for (SmallVector<ScavengedInfo, 2>::const_iterator I = Scavenged.begin(),
|
||||
IE = Scavenged.end(); I != IE; ++I)
|
||||
if (I->FrameIndex == FI)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Get an array of scavenging frame indices.
|
||||
void getScavengingFrameIndices(SmallVectorImpl<int> &A) const {
|
||||
for (SmallVector<ScavengedInfo, 2>::const_iterator I = Scavenged.begin(),
|
||||
IE = Scavenged.end(); I != IE; ++I)
|
||||
if (I->FrameIndex >= 0)
|
||||
A.push_back(I->FrameIndex);
|
||||
}
|
||||
|
||||
/// scavengeRegister - Make a register of the specific register class
|
||||
/// available and do the appropriate bookkeeping. SPAdj is the stack
|
||||
/// adjustment due to call frame, it's passed along to eliminateFrameIndex().
|
||||
/// Returns the scavenged register.
|
||||
unsigned scavengeRegister(const TargetRegisterClass *RegClass,
|
||||
MachineBasicBlock::iterator I, int SPAdj);
|
||||
unsigned scavengeRegister(const TargetRegisterClass *RegClass, int SPAdj) {
|
||||
return scavengeRegister(RegClass, MBBI, SPAdj);
|
||||
}
|
||||
|
||||
/// setUsed - Tell the scavenger a register is used.
|
||||
///
|
||||
void setUsed(unsigned Reg);
|
||||
private:
|
||||
/// isReserved - Returns true if a register is reserved. It is never "unused".
|
||||
bool isReserved(unsigned Reg) const { return MRI->isReserved(Reg); }
|
||||
|
||||
/// isUsed - Test if a register is currently being used. When called by the
|
||||
/// isAliasUsed function, we only check isReserved if this is the original
|
||||
/// register, not an alias register.
|
||||
///
|
||||
bool isUsed(unsigned Reg, bool CheckReserved = true) const {
|
||||
return !RegsAvailable.test(Reg) || (CheckReserved && isReserved(Reg));
|
||||
}
|
||||
|
||||
/// isAliasUsed - Is Reg or an alias currently in use?
|
||||
bool isAliasUsed(unsigned Reg) const;
|
||||
|
||||
/// setUsed / setUnused - Mark the state of one or a number of registers.
|
||||
///
|
||||
void setUsed(BitVector &Regs) {
|
||||
RegsAvailable.reset(Regs);
|
||||
}
|
||||
void setUnused(BitVector &Regs) {
|
||||
RegsAvailable |= Regs;
|
||||
}
|
||||
|
||||
/// Processes the current instruction and fill the KillRegs and DefRegs bit
|
||||
/// vectors.
|
||||
void determineKillsAndDefs();
|
||||
|
||||
/// Add Reg and all its sub-registers to BV.
|
||||
void addRegWithSubRegs(BitVector &BV, unsigned Reg);
|
||||
|
||||
/// findSurvivorReg - Return the candidate register that is unused for the
|
||||
/// longest after StartMI. UseMI is set to the instruction where the search
|
||||
/// stopped.
|
||||
///
|
||||
/// No more than InstrLimit instructions are inspected.
|
||||
unsigned findSurvivorReg(MachineBasicBlock::iterator StartMI,
|
||||
BitVector &Candidates,
|
||||
unsigned InstrLimit,
|
||||
MachineBasicBlock::iterator &UseMI);
|
||||
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
142
thirdparty/clang/include/llvm/CodeGen/ResourcePriorityQueue.h
vendored
Normal file
142
thirdparty/clang/include/llvm/CodeGen/ResourcePriorityQueue.h
vendored
Normal file
@@ -0,0 +1,142 @@
|
||||
//===----- ResourcePriorityQueue.h - A DFA-oriented priority queue -------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file implements the ResourcePriorityQueue class, which is a
|
||||
// SchedulingPriorityQueue that schedules using DFA state to
|
||||
// reduce the length of the critical path through the basic block
|
||||
// on VLIW platforms.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_RESOURCEPRIORITYQUEUE_H
|
||||
#define LLVM_CODEGEN_RESOURCEPRIORITYQUEUE_H
|
||||
|
||||
#include "llvm/CodeGen/DFAPacketizer.h"
|
||||
#include "llvm/CodeGen/ScheduleDAG.h"
|
||||
#include "llvm/CodeGen/SelectionDAGISel.h"
|
||||
#include "llvm/MC/MCInstrItineraries.h"
|
||||
#include "llvm/Target/TargetInstrInfo.h"
|
||||
#include "llvm/Target/TargetRegisterInfo.h"
|
||||
|
||||
namespace llvm {
|
||||
class ResourcePriorityQueue;
|
||||
|
||||
/// Sorting functions for the Available queue.
|
||||
struct resource_sort : public std::binary_function<SUnit*, SUnit*, bool> {
|
||||
ResourcePriorityQueue *PQ;
|
||||
explicit resource_sort(ResourcePriorityQueue *pq) : PQ(pq) {}
|
||||
|
||||
bool operator()(const SUnit* left, const SUnit* right) const;
|
||||
};
|
||||
|
||||
class ResourcePriorityQueue : public SchedulingPriorityQueue {
|
||||
/// SUnits - The SUnits for the current graph.
|
||||
std::vector<SUnit> *SUnits;
|
||||
|
||||
/// NumNodesSolelyBlocking - This vector contains, for every node in the
|
||||
/// Queue, the number of nodes that the node is the sole unscheduled
|
||||
/// predecessor for. This is used as a tie-breaker heuristic for better
|
||||
/// mobility.
|
||||
std::vector<unsigned> NumNodesSolelyBlocking;
|
||||
|
||||
/// Queue - The queue.
|
||||
std::vector<SUnit*> Queue;
|
||||
|
||||
/// RegPressure - Tracking current reg pressure per register class.
|
||||
///
|
||||
std::vector<unsigned> RegPressure;
|
||||
|
||||
/// RegLimit - Tracking the number of allocatable registers per register
|
||||
/// class.
|
||||
std::vector<unsigned> RegLimit;
|
||||
|
||||
resource_sort Picker;
|
||||
const TargetRegisterInfo *TRI;
|
||||
const TargetLowering *TLI;
|
||||
const TargetInstrInfo *TII;
|
||||
const InstrItineraryData* InstrItins;
|
||||
/// ResourcesModel - Represents VLIW state.
|
||||
/// Not limited to VLIW targets per say, but assumes
|
||||
/// definition of DFA by a target.
|
||||
DFAPacketizer *ResourcesModel;
|
||||
|
||||
/// Resource model - packet/bundle model. Purely
|
||||
/// internal at the time.
|
||||
std::vector<SUnit*> Packet;
|
||||
|
||||
/// Heuristics for estimating register pressure.
|
||||
unsigned ParallelLiveRanges;
|
||||
signed HorizontalVerticalBalance;
|
||||
|
||||
public:
|
||||
ResourcePriorityQueue(SelectionDAGISel *IS);
|
||||
|
||||
~ResourcePriorityQueue() {
|
||||
delete ResourcesModel;
|
||||
}
|
||||
|
||||
bool isBottomUp() const { return false; }
|
||||
|
||||
void initNodes(std::vector<SUnit> &sunits);
|
||||
|
||||
void addNode(const SUnit *SU) {
|
||||
NumNodesSolelyBlocking.resize(SUnits->size(), 0);
|
||||
}
|
||||
|
||||
void updateNode(const SUnit *SU) {}
|
||||
|
||||
void releaseState() {
|
||||
SUnits = 0;
|
||||
}
|
||||
|
||||
unsigned getLatency(unsigned NodeNum) const {
|
||||
assert(NodeNum < (*SUnits).size());
|
||||
return (*SUnits)[NodeNum].getHeight();
|
||||
}
|
||||
|
||||
unsigned getNumSolelyBlockNodes(unsigned NodeNum) const {
|
||||
assert(NodeNum < NumNodesSolelyBlocking.size());
|
||||
return NumNodesSolelyBlocking[NodeNum];
|
||||
}
|
||||
|
||||
/// Single cost function reflecting benefit of scheduling SU
|
||||
/// in the current cycle.
|
||||
signed SUSchedulingCost (SUnit *SU);
|
||||
|
||||
/// InitNumRegDefsLeft - Determine the # of regs defined by this node.
|
||||
///
|
||||
void initNumRegDefsLeft(SUnit *SU);
|
||||
void updateNumRegDefsLeft(SUnit *SU);
|
||||
signed regPressureDelta(SUnit *SU, bool RawPressure = false);
|
||||
signed rawRegPressureDelta (SUnit *SU, unsigned RCId);
|
||||
|
||||
bool empty() const { return Queue.empty(); }
|
||||
|
||||
virtual void push(SUnit *U);
|
||||
|
||||
virtual SUnit *pop();
|
||||
|
||||
virtual void remove(SUnit *SU);
|
||||
|
||||
virtual void dump(ScheduleDAG* DAG) const;
|
||||
|
||||
/// scheduledNode - Main resource tracking point.
|
||||
void scheduledNode(SUnit *Node);
|
||||
bool isResourceAvailable(SUnit *SU);
|
||||
void reserveResources(SUnit *SU);
|
||||
|
||||
private:
|
||||
void adjustPriorityOfUnscheduledPreds(SUnit *SU);
|
||||
SUnit *getSingleUnscheduledPred(SUnit *SU);
|
||||
unsigned numberRCValPredInSU (SUnit *SU, unsigned RCId);
|
||||
unsigned numberRCValSuccInSU (SUnit *SU, unsigned RCId);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
381
thirdparty/clang/include/llvm/CodeGen/RuntimeLibcalls.h
vendored
Normal file
381
thirdparty/clang/include/llvm/CodeGen/RuntimeLibcalls.h
vendored
Normal file
@@ -0,0 +1,381 @@
|
||||
//===-- CodeGen/RuntimeLibcall.h - Runtime Library Calls --------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines the enum representing the list of runtime library calls
|
||||
// the backend may emit during code generation, and also some helper functions.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_RUNTIMELIBCALLS_H
|
||||
#define LLVM_CODEGEN_RUNTIMELIBCALLS_H
|
||||
|
||||
#include "llvm/CodeGen/ValueTypes.h"
|
||||
|
||||
namespace llvm {
|
||||
namespace RTLIB {
|
||||
/// RTLIB::Libcall enum - This enum defines all of the runtime library calls
|
||||
/// the backend can emit. The various long double types cannot be merged,
|
||||
/// because 80-bit library functions use "xf" and 128-bit use "tf".
|
||||
///
|
||||
/// When adding PPCF128 functions here, note that their names generally need
|
||||
/// to be overridden for Darwin with the xxx$LDBL128 form. See
|
||||
/// PPCISelLowering.cpp.
|
||||
///
|
||||
enum Libcall {
|
||||
// Integer
|
||||
SHL_I16,
|
||||
SHL_I32,
|
||||
SHL_I64,
|
||||
SHL_I128,
|
||||
SRL_I16,
|
||||
SRL_I32,
|
||||
SRL_I64,
|
||||
SRL_I128,
|
||||
SRA_I16,
|
||||
SRA_I32,
|
||||
SRA_I64,
|
||||
SRA_I128,
|
||||
MUL_I8,
|
||||
MUL_I16,
|
||||
MUL_I32,
|
||||
MUL_I64,
|
||||
MUL_I128,
|
||||
MULO_I32,
|
||||
MULO_I64,
|
||||
MULO_I128,
|
||||
SDIV_I8,
|
||||
SDIV_I16,
|
||||
SDIV_I32,
|
||||
SDIV_I64,
|
||||
SDIV_I128,
|
||||
UDIV_I8,
|
||||
UDIV_I16,
|
||||
UDIV_I32,
|
||||
UDIV_I64,
|
||||
UDIV_I128,
|
||||
SREM_I8,
|
||||
SREM_I16,
|
||||
SREM_I32,
|
||||
SREM_I64,
|
||||
SREM_I128,
|
||||
UREM_I8,
|
||||
UREM_I16,
|
||||
UREM_I32,
|
||||
UREM_I64,
|
||||
UREM_I128,
|
||||
SDIVREM_I8,
|
||||
SDIVREM_I16,
|
||||
SDIVREM_I32,
|
||||
SDIVREM_I64,
|
||||
SDIVREM_I128,
|
||||
UDIVREM_I8,
|
||||
UDIVREM_I16,
|
||||
UDIVREM_I32,
|
||||
UDIVREM_I64,
|
||||
UDIVREM_I128,
|
||||
NEG_I32,
|
||||
NEG_I64,
|
||||
|
||||
// FLOATING POINT
|
||||
ADD_F32,
|
||||
ADD_F64,
|
||||
ADD_F80,
|
||||
ADD_F128,
|
||||
ADD_PPCF128,
|
||||
SUB_F32,
|
||||
SUB_F64,
|
||||
SUB_F80,
|
||||
SUB_F128,
|
||||
SUB_PPCF128,
|
||||
MUL_F32,
|
||||
MUL_F64,
|
||||
MUL_F80,
|
||||
MUL_F128,
|
||||
MUL_PPCF128,
|
||||
DIV_F32,
|
||||
DIV_F64,
|
||||
DIV_F80,
|
||||
DIV_F128,
|
||||
DIV_PPCF128,
|
||||
REM_F32,
|
||||
REM_F64,
|
||||
REM_F80,
|
||||
REM_F128,
|
||||
REM_PPCF128,
|
||||
FMA_F32,
|
||||
FMA_F64,
|
||||
FMA_F80,
|
||||
FMA_F128,
|
||||
FMA_PPCF128,
|
||||
POWI_F32,
|
||||
POWI_F64,
|
||||
POWI_F80,
|
||||
POWI_F128,
|
||||
POWI_PPCF128,
|
||||
SQRT_F32,
|
||||
SQRT_F64,
|
||||
SQRT_F80,
|
||||
SQRT_F128,
|
||||
SQRT_PPCF128,
|
||||
LOG_F32,
|
||||
LOG_F64,
|
||||
LOG_F80,
|
||||
LOG_F128,
|
||||
LOG_PPCF128,
|
||||
LOG2_F32,
|
||||
LOG2_F64,
|
||||
LOG2_F80,
|
||||
LOG2_F128,
|
||||
LOG2_PPCF128,
|
||||
LOG10_F32,
|
||||
LOG10_F64,
|
||||
LOG10_F80,
|
||||
LOG10_F128,
|
||||
LOG10_PPCF128,
|
||||
EXP_F32,
|
||||
EXP_F64,
|
||||
EXP_F80,
|
||||
EXP_F128,
|
||||
EXP_PPCF128,
|
||||
EXP2_F32,
|
||||
EXP2_F64,
|
||||
EXP2_F80,
|
||||
EXP2_F128,
|
||||
EXP2_PPCF128,
|
||||
SIN_F32,
|
||||
SIN_F64,
|
||||
SIN_F80,
|
||||
SIN_F128,
|
||||
SIN_PPCF128,
|
||||
COS_F32,
|
||||
COS_F64,
|
||||
COS_F80,
|
||||
COS_F128,
|
||||
COS_PPCF128,
|
||||
SINCOS_F32,
|
||||
SINCOS_F64,
|
||||
SINCOS_F80,
|
||||
SINCOS_F128,
|
||||
SINCOS_PPCF128,
|
||||
POW_F32,
|
||||
POW_F64,
|
||||
POW_F80,
|
||||
POW_F128,
|
||||
POW_PPCF128,
|
||||
CEIL_F32,
|
||||
CEIL_F64,
|
||||
CEIL_F80,
|
||||
CEIL_F128,
|
||||
CEIL_PPCF128,
|
||||
TRUNC_F32,
|
||||
TRUNC_F64,
|
||||
TRUNC_F80,
|
||||
TRUNC_F128,
|
||||
TRUNC_PPCF128,
|
||||
RINT_F32,
|
||||
RINT_F64,
|
||||
RINT_F80,
|
||||
RINT_F128,
|
||||
RINT_PPCF128,
|
||||
NEARBYINT_F32,
|
||||
NEARBYINT_F64,
|
||||
NEARBYINT_F80,
|
||||
NEARBYINT_F128,
|
||||
NEARBYINT_PPCF128,
|
||||
FLOOR_F32,
|
||||
FLOOR_F64,
|
||||
FLOOR_F80,
|
||||
FLOOR_F128,
|
||||
FLOOR_PPCF128,
|
||||
COPYSIGN_F32,
|
||||
COPYSIGN_F64,
|
||||
COPYSIGN_F80,
|
||||
COPYSIGN_F128,
|
||||
COPYSIGN_PPCF128,
|
||||
|
||||
// CONVERSION
|
||||
FPEXT_F64_F128,
|
||||
FPEXT_F32_F128,
|
||||
FPEXT_F32_F64,
|
||||
FPEXT_F16_F32,
|
||||
FPROUND_F32_F16,
|
||||
FPROUND_F64_F32,
|
||||
FPROUND_F80_F32,
|
||||
FPROUND_F128_F32,
|
||||
FPROUND_PPCF128_F32,
|
||||
FPROUND_F80_F64,
|
||||
FPROUND_F128_F64,
|
||||
FPROUND_PPCF128_F64,
|
||||
FPTOSINT_F32_I8,
|
||||
FPTOSINT_F32_I16,
|
||||
FPTOSINT_F32_I32,
|
||||
FPTOSINT_F32_I64,
|
||||
FPTOSINT_F32_I128,
|
||||
FPTOSINT_F64_I8,
|
||||
FPTOSINT_F64_I16,
|
||||
FPTOSINT_F64_I32,
|
||||
FPTOSINT_F64_I64,
|
||||
FPTOSINT_F64_I128,
|
||||
FPTOSINT_F80_I32,
|
||||
FPTOSINT_F80_I64,
|
||||
FPTOSINT_F80_I128,
|
||||
FPTOSINT_F128_I32,
|
||||
FPTOSINT_F128_I64,
|
||||
FPTOSINT_F128_I128,
|
||||
FPTOSINT_PPCF128_I32,
|
||||
FPTOSINT_PPCF128_I64,
|
||||
FPTOSINT_PPCF128_I128,
|
||||
FPTOUINT_F32_I8,
|
||||
FPTOUINT_F32_I16,
|
||||
FPTOUINT_F32_I32,
|
||||
FPTOUINT_F32_I64,
|
||||
FPTOUINT_F32_I128,
|
||||
FPTOUINT_F64_I8,
|
||||
FPTOUINT_F64_I16,
|
||||
FPTOUINT_F64_I32,
|
||||
FPTOUINT_F64_I64,
|
||||
FPTOUINT_F64_I128,
|
||||
FPTOUINT_F80_I32,
|
||||
FPTOUINT_F80_I64,
|
||||
FPTOUINT_F80_I128,
|
||||
FPTOUINT_F128_I32,
|
||||
FPTOUINT_F128_I64,
|
||||
FPTOUINT_F128_I128,
|
||||
FPTOUINT_PPCF128_I32,
|
||||
FPTOUINT_PPCF128_I64,
|
||||
FPTOUINT_PPCF128_I128,
|
||||
SINTTOFP_I32_F32,
|
||||
SINTTOFP_I32_F64,
|
||||
SINTTOFP_I32_F80,
|
||||
SINTTOFP_I32_F128,
|
||||
SINTTOFP_I32_PPCF128,
|
||||
SINTTOFP_I64_F32,
|
||||
SINTTOFP_I64_F64,
|
||||
SINTTOFP_I64_F80,
|
||||
SINTTOFP_I64_F128,
|
||||
SINTTOFP_I64_PPCF128,
|
||||
SINTTOFP_I128_F32,
|
||||
SINTTOFP_I128_F64,
|
||||
SINTTOFP_I128_F80,
|
||||
SINTTOFP_I128_F128,
|
||||
SINTTOFP_I128_PPCF128,
|
||||
UINTTOFP_I32_F32,
|
||||
UINTTOFP_I32_F64,
|
||||
UINTTOFP_I32_F80,
|
||||
UINTTOFP_I32_F128,
|
||||
UINTTOFP_I32_PPCF128,
|
||||
UINTTOFP_I64_F32,
|
||||
UINTTOFP_I64_F64,
|
||||
UINTTOFP_I64_F80,
|
||||
UINTTOFP_I64_F128,
|
||||
UINTTOFP_I64_PPCF128,
|
||||
UINTTOFP_I128_F32,
|
||||
UINTTOFP_I128_F64,
|
||||
UINTTOFP_I128_F80,
|
||||
UINTTOFP_I128_F128,
|
||||
UINTTOFP_I128_PPCF128,
|
||||
|
||||
// COMPARISON
|
||||
OEQ_F32,
|
||||
OEQ_F64,
|
||||
OEQ_F128,
|
||||
UNE_F32,
|
||||
UNE_F64,
|
||||
UNE_F128,
|
||||
OGE_F32,
|
||||
OGE_F64,
|
||||
OGE_F128,
|
||||
OLT_F32,
|
||||
OLT_F64,
|
||||
OLT_F128,
|
||||
OLE_F32,
|
||||
OLE_F64,
|
||||
OLE_F128,
|
||||
OGT_F32,
|
||||
OGT_F64,
|
||||
OGT_F128,
|
||||
UO_F32,
|
||||
UO_F64,
|
||||
UO_F128,
|
||||
O_F32,
|
||||
O_F64,
|
||||
O_F128,
|
||||
|
||||
// MEMORY
|
||||
MEMCPY,
|
||||
MEMSET,
|
||||
MEMMOVE,
|
||||
|
||||
// EXCEPTION HANDLING
|
||||
UNWIND_RESUME,
|
||||
|
||||
// Family ATOMICs
|
||||
SYNC_VAL_COMPARE_AND_SWAP_1,
|
||||
SYNC_VAL_COMPARE_AND_SWAP_2,
|
||||
SYNC_VAL_COMPARE_AND_SWAP_4,
|
||||
SYNC_VAL_COMPARE_AND_SWAP_8,
|
||||
SYNC_LOCK_TEST_AND_SET_1,
|
||||
SYNC_LOCK_TEST_AND_SET_2,
|
||||
SYNC_LOCK_TEST_AND_SET_4,
|
||||
SYNC_LOCK_TEST_AND_SET_8,
|
||||
SYNC_FETCH_AND_ADD_1,
|
||||
SYNC_FETCH_AND_ADD_2,
|
||||
SYNC_FETCH_AND_ADD_4,
|
||||
SYNC_FETCH_AND_ADD_8,
|
||||
SYNC_FETCH_AND_SUB_1,
|
||||
SYNC_FETCH_AND_SUB_2,
|
||||
SYNC_FETCH_AND_SUB_4,
|
||||
SYNC_FETCH_AND_SUB_8,
|
||||
SYNC_FETCH_AND_AND_1,
|
||||
SYNC_FETCH_AND_AND_2,
|
||||
SYNC_FETCH_AND_AND_4,
|
||||
SYNC_FETCH_AND_AND_8,
|
||||
SYNC_FETCH_AND_OR_1,
|
||||
SYNC_FETCH_AND_OR_2,
|
||||
SYNC_FETCH_AND_OR_4,
|
||||
SYNC_FETCH_AND_OR_8,
|
||||
SYNC_FETCH_AND_XOR_1,
|
||||
SYNC_FETCH_AND_XOR_2,
|
||||
SYNC_FETCH_AND_XOR_4,
|
||||
SYNC_FETCH_AND_XOR_8,
|
||||
SYNC_FETCH_AND_NAND_1,
|
||||
SYNC_FETCH_AND_NAND_2,
|
||||
SYNC_FETCH_AND_NAND_4,
|
||||
SYNC_FETCH_AND_NAND_8,
|
||||
|
||||
UNKNOWN_LIBCALL
|
||||
};
|
||||
|
||||
/// getFPEXT - Return the FPEXT_*_* value for the given types, or
|
||||
/// UNKNOWN_LIBCALL if there is none.
|
||||
Libcall getFPEXT(EVT OpVT, EVT RetVT);
|
||||
|
||||
/// getFPROUND - Return the FPROUND_*_* value for the given types, or
|
||||
/// UNKNOWN_LIBCALL if there is none.
|
||||
Libcall getFPROUND(EVT OpVT, EVT RetVT);
|
||||
|
||||
/// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
|
||||
/// UNKNOWN_LIBCALL if there is none.
|
||||
Libcall getFPTOSINT(EVT OpVT, EVT RetVT);
|
||||
|
||||
/// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
|
||||
/// UNKNOWN_LIBCALL if there is none.
|
||||
Libcall getFPTOUINT(EVT OpVT, EVT RetVT);
|
||||
|
||||
/// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
|
||||
/// UNKNOWN_LIBCALL if there is none.
|
||||
Libcall getSINTTOFP(EVT OpVT, EVT RetVT);
|
||||
|
||||
/// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
|
||||
/// UNKNOWN_LIBCALL if there is none.
|
||||
Libcall getUINTTOFP(EVT OpVT, EVT RetVT);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
759
thirdparty/clang/include/llvm/CodeGen/ScheduleDAG.h
vendored
Normal file
759
thirdparty/clang/include/llvm/CodeGen/ScheduleDAG.h
vendored
Normal file
@@ -0,0 +1,759 @@
|
||||
//===------- llvm/CodeGen/ScheduleDAG.h - Common Base Class------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file implements the ScheduleDAG class, which is used as the common
|
||||
// base class for instruction schedulers. This encapsulates the scheduling DAG,
|
||||
// which is shared between SelectionDAG and MachineInstr scheduling.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_SCHEDULEDAG_H
|
||||
#define LLVM_CODEGEN_SCHEDULEDAG_H
|
||||
|
||||
#include "llvm/ADT/BitVector.h"
|
||||
#include "llvm/ADT/GraphTraits.h"
|
||||
#include "llvm/ADT/PointerIntPair.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/CodeGen/MachineInstr.h"
|
||||
#include "llvm/Target/TargetLowering.h"
|
||||
|
||||
namespace llvm {
|
||||
class AliasAnalysis;
|
||||
class SUnit;
|
||||
class MachineConstantPool;
|
||||
class MachineFunction;
|
||||
class MachineRegisterInfo;
|
||||
class MachineInstr;
|
||||
struct MCSchedClassDesc;
|
||||
class TargetRegisterInfo;
|
||||
class ScheduleDAG;
|
||||
class SDNode;
|
||||
class TargetInstrInfo;
|
||||
class MCInstrDesc;
|
||||
class TargetMachine;
|
||||
class TargetRegisterClass;
|
||||
template<class Graph> class GraphWriter;
|
||||
|
||||
/// SDep - Scheduling dependency. This represents one direction of an
|
||||
/// edge in the scheduling DAG.
|
||||
class SDep {
|
||||
public:
|
||||
/// Kind - These are the different kinds of scheduling dependencies.
|
||||
enum Kind {
|
||||
Data, ///< Regular data dependence (aka true-dependence).
|
||||
Anti, ///< A register anti-dependedence (aka WAR).
|
||||
Output, ///< A register output-dependence (aka WAW).
|
||||
Order ///< Any other ordering dependency.
|
||||
};
|
||||
|
||||
// Strong dependencies must be respected by the scheduler. Artificial
|
||||
// dependencies may be removed only if they are redundant with another
|
||||
// strong depedence.
|
||||
//
|
||||
// Weak dependencies may be violated by the scheduling strategy, but only if
|
||||
// the strategy can prove it is correct to do so.
|
||||
//
|
||||
// Strong OrderKinds must occur before "Weak".
|
||||
// Weak OrderKinds must occur after "Weak".
|
||||
enum OrderKind {
|
||||
Barrier, ///< An unknown scheduling barrier.
|
||||
MayAliasMem, ///< Nonvolatile load/Store instructions that may alias.
|
||||
MustAliasMem, ///< Nonvolatile load/Store instructions that must alias.
|
||||
Artificial, ///< Arbitrary strong DAG edge (no real dependence).
|
||||
Weak, ///< Arbitrary weak DAG edge.
|
||||
Cluster ///< Weak DAG edge linking a chain of clustered instrs.
|
||||
};
|
||||
|
||||
private:
|
||||
/// Dep - A pointer to the depending/depended-on SUnit, and an enum
|
||||
/// indicating the kind of the dependency.
|
||||
PointerIntPair<SUnit *, 2, Kind> Dep;
|
||||
|
||||
/// Contents - A union discriminated by the dependence kind.
|
||||
union {
|
||||
/// Reg - For Data, Anti, and Output dependencies, the associated
|
||||
/// register. For Data dependencies that don't currently have a register
|
||||
/// assigned, this is set to zero.
|
||||
unsigned Reg;
|
||||
|
||||
/// Order - Additional information about Order dependencies.
|
||||
unsigned OrdKind; // enum OrderKind
|
||||
} Contents;
|
||||
|
||||
/// Latency - The time associated with this edge. Often this is just
|
||||
/// the value of the Latency field of the predecessor, however advanced
|
||||
/// models may provide additional information about specific edges.
|
||||
unsigned Latency;
|
||||
/// Record MinLatency seperately from "expected" Latency.
|
||||
///
|
||||
/// FIXME: this field is not packed on LP64. Convert to 16-bit DAG edge
|
||||
/// latency after introducing saturating truncation.
|
||||
unsigned MinLatency;
|
||||
|
||||
public:
|
||||
/// SDep - Construct a null SDep. This is only for use by container
|
||||
/// classes which require default constructors. SUnits may not
|
||||
/// have null SDep edges.
|
||||
SDep() : Dep(0, Data) {}
|
||||
|
||||
/// SDep - Construct an SDep with the specified values.
|
||||
SDep(SUnit *S, Kind kind, unsigned Reg)
|
||||
: Dep(S, kind), Contents() {
|
||||
switch (kind) {
|
||||
default:
|
||||
llvm_unreachable("Reg given for non-register dependence!");
|
||||
case Anti:
|
||||
case Output:
|
||||
assert(Reg != 0 &&
|
||||
"SDep::Anti and SDep::Output must use a non-zero Reg!");
|
||||
Contents.Reg = Reg;
|
||||
Latency = 0;
|
||||
break;
|
||||
case Data:
|
||||
Contents.Reg = Reg;
|
||||
Latency = 1;
|
||||
break;
|
||||
}
|
||||
MinLatency = Latency;
|
||||
}
|
||||
SDep(SUnit *S, OrderKind kind)
|
||||
: Dep(S, Order), Contents(), Latency(0), MinLatency(0) {
|
||||
Contents.OrdKind = kind;
|
||||
}
|
||||
|
||||
/// Return true if the specified SDep is equivalent except for latency.
|
||||
bool overlaps(const SDep &Other) const {
|
||||
if (Dep != Other.Dep) return false;
|
||||
switch (Dep.getInt()) {
|
||||
case Data:
|
||||
case Anti:
|
||||
case Output:
|
||||
return Contents.Reg == Other.Contents.Reg;
|
||||
case Order:
|
||||
return Contents.OrdKind == Other.Contents.OrdKind;
|
||||
}
|
||||
llvm_unreachable("Invalid dependency kind!");
|
||||
}
|
||||
|
||||
bool operator==(const SDep &Other) const {
|
||||
return overlaps(Other)
|
||||
&& Latency == Other.Latency && MinLatency == Other.MinLatency;
|
||||
}
|
||||
|
||||
bool operator!=(const SDep &Other) const {
|
||||
return !operator==(Other);
|
||||
}
|
||||
|
||||
/// getLatency - Return the latency value for this edge, which roughly
|
||||
/// means the minimum number of cycles that must elapse between the
|
||||
/// predecessor and the successor, given that they have this edge
|
||||
/// between them.
|
||||
unsigned getLatency() const {
|
||||
return Latency;
|
||||
}
|
||||
|
||||
/// setLatency - Set the latency for this edge.
|
||||
void setLatency(unsigned Lat) {
|
||||
Latency = Lat;
|
||||
}
|
||||
|
||||
/// getMinLatency - Return the minimum latency for this edge. Minimum
|
||||
/// latency is used for scheduling groups, while normal (expected) latency
|
||||
/// is for instruction cost and critical path.
|
||||
unsigned getMinLatency() const {
|
||||
return MinLatency;
|
||||
}
|
||||
|
||||
/// setMinLatency - Set the minimum latency for this edge.
|
||||
void setMinLatency(unsigned Lat) {
|
||||
MinLatency = Lat;
|
||||
}
|
||||
|
||||
//// getSUnit - Return the SUnit to which this edge points.
|
||||
SUnit *getSUnit() const {
|
||||
return Dep.getPointer();
|
||||
}
|
||||
|
||||
//// setSUnit - Assign the SUnit to which this edge points.
|
||||
void setSUnit(SUnit *SU) {
|
||||
Dep.setPointer(SU);
|
||||
}
|
||||
|
||||
/// getKind - Return an enum value representing the kind of the dependence.
|
||||
Kind getKind() const {
|
||||
return Dep.getInt();
|
||||
}
|
||||
|
||||
/// isCtrl - Shorthand for getKind() != SDep::Data.
|
||||
bool isCtrl() const {
|
||||
return getKind() != Data;
|
||||
}
|
||||
|
||||
/// isNormalMemory - Test if this is an Order dependence between two
|
||||
/// memory accesses where both sides of the dependence access memory
|
||||
/// in non-volatile and fully modeled ways.
|
||||
bool isNormalMemory() const {
|
||||
return getKind() == Order && (Contents.OrdKind == MayAliasMem
|
||||
|| Contents.OrdKind == MustAliasMem);
|
||||
}
|
||||
|
||||
/// isMustAlias - Test if this is an Order dependence that is marked
|
||||
/// as "must alias", meaning that the SUnits at either end of the edge
|
||||
/// have a memory dependence on a known memory location.
|
||||
bool isMustAlias() const {
|
||||
return getKind() == Order && Contents.OrdKind == MustAliasMem;
|
||||
}
|
||||
|
||||
/// isWeak - Test if this a weak dependence. Weak dependencies are
|
||||
/// considered DAG edges for height computation and other heuristics, but do
|
||||
/// not force ordering. Breaking a weak edge may require the scheduler to
|
||||
/// compensate, for example by inserting a copy.
|
||||
bool isWeak() const {
|
||||
return getKind() == Order && Contents.OrdKind >= Weak;
|
||||
}
|
||||
|
||||
/// isArtificial - Test if this is an Order dependence that is marked
|
||||
/// as "artificial", meaning it isn't necessary for correctness.
|
||||
bool isArtificial() const {
|
||||
return getKind() == Order && Contents.OrdKind == Artificial;
|
||||
}
|
||||
|
||||
/// isCluster - Test if this is an Order dependence that is marked
|
||||
/// as "cluster", meaning it is artificial and wants to be adjacent.
|
||||
bool isCluster() const {
|
||||
return getKind() == Order && Contents.OrdKind == Cluster;
|
||||
}
|
||||
|
||||
/// isAssignedRegDep - Test if this is a Data dependence that is
|
||||
/// associated with a register.
|
||||
bool isAssignedRegDep() const {
|
||||
return getKind() == Data && Contents.Reg != 0;
|
||||
}
|
||||
|
||||
/// getReg - Return the register associated with this edge. This is
|
||||
/// only valid on Data, Anti, and Output edges. On Data edges, this
|
||||
/// value may be zero, meaning there is no associated register.
|
||||
unsigned getReg() const {
|
||||
assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
|
||||
"getReg called on non-register dependence edge!");
|
||||
return Contents.Reg;
|
||||
}
|
||||
|
||||
/// setReg - Assign the associated register for this edge. This is
|
||||
/// only valid on Data, Anti, and Output edges. On Anti and Output
|
||||
/// edges, this value must not be zero. On Data edges, the value may
|
||||
/// be zero, which would mean that no specific register is associated
|
||||
/// with this edge.
|
||||
void setReg(unsigned Reg) {
|
||||
assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
|
||||
"setReg called on non-register dependence edge!");
|
||||
assert((getKind() != Anti || Reg != 0) &&
|
||||
"SDep::Anti edge cannot use the zero register!");
|
||||
assert((getKind() != Output || Reg != 0) &&
|
||||
"SDep::Output edge cannot use the zero register!");
|
||||
Contents.Reg = Reg;
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct isPodLike<SDep> { static const bool value = true; };
|
||||
|
||||
/// SUnit - Scheduling unit. This is a node in the scheduling DAG.
|
||||
class SUnit {
|
||||
private:
|
||||
enum { BoundaryID = ~0u };
|
||||
|
||||
SDNode *Node; // Representative node.
|
||||
MachineInstr *Instr; // Alternatively, a MachineInstr.
|
||||
public:
|
||||
SUnit *OrigNode; // If not this, the node from which
|
||||
// this node was cloned.
|
||||
// (SD scheduling only)
|
||||
|
||||
const MCSchedClassDesc *SchedClass; // NULL or resolved SchedClass.
|
||||
|
||||
// Preds/Succs - The SUnits before/after us in the graph.
|
||||
SmallVector<SDep, 4> Preds; // All sunit predecessors.
|
||||
SmallVector<SDep, 4> Succs; // All sunit successors.
|
||||
|
||||
typedef SmallVector<SDep, 4>::iterator pred_iterator;
|
||||
typedef SmallVector<SDep, 4>::iterator succ_iterator;
|
||||
typedef SmallVector<SDep, 4>::const_iterator const_pred_iterator;
|
||||
typedef SmallVector<SDep, 4>::const_iterator const_succ_iterator;
|
||||
|
||||
unsigned NodeNum; // Entry # of node in the node vector.
|
||||
unsigned NodeQueueId; // Queue id of node.
|
||||
unsigned NumPreds; // # of SDep::Data preds.
|
||||
unsigned NumSuccs; // # of SDep::Data sucss.
|
||||
unsigned NumPredsLeft; // # of preds not scheduled.
|
||||
unsigned NumSuccsLeft; // # of succs not scheduled.
|
||||
unsigned WeakPredsLeft; // # of weak preds not scheduled.
|
||||
unsigned WeakSuccsLeft; // # of weak succs not scheduled.
|
||||
unsigned short NumRegDefsLeft; // # of reg defs with no scheduled use.
|
||||
unsigned short Latency; // Node latency.
|
||||
bool isVRegCycle : 1; // May use and def the same vreg.
|
||||
bool isCall : 1; // Is a function call.
|
||||
bool isCallOp : 1; // Is a function call operand.
|
||||
bool isTwoAddress : 1; // Is a two-address instruction.
|
||||
bool isCommutable : 1; // Is a commutable instruction.
|
||||
bool hasPhysRegUses : 1; // Has physreg uses.
|
||||
bool hasPhysRegDefs : 1; // Has physreg defs that are being used.
|
||||
bool hasPhysRegClobbers : 1; // Has any physreg defs, used or not.
|
||||
bool isPending : 1; // True once pending.
|
||||
bool isAvailable : 1; // True once available.
|
||||
bool isScheduled : 1; // True once scheduled.
|
||||
bool isScheduleHigh : 1; // True if preferable to schedule high.
|
||||
bool isScheduleLow : 1; // True if preferable to schedule low.
|
||||
bool isCloned : 1; // True if this node has been cloned.
|
||||
Sched::Preference SchedulingPref; // Scheduling preference.
|
||||
|
||||
private:
|
||||
bool isDepthCurrent : 1; // True if Depth is current.
|
||||
bool isHeightCurrent : 1; // True if Height is current.
|
||||
unsigned Depth; // Node depth.
|
||||
unsigned Height; // Node height.
|
||||
public:
|
||||
unsigned TopReadyCycle; // Cycle relative to start when node is ready.
|
||||
unsigned BotReadyCycle; // Cycle relative to end when node is ready.
|
||||
|
||||
const TargetRegisterClass *CopyDstRC; // Is a special copy node if not null.
|
||||
const TargetRegisterClass *CopySrcRC;
|
||||
|
||||
/// SUnit - Construct an SUnit for pre-regalloc scheduling to represent
|
||||
/// an SDNode and any nodes flagged to it.
|
||||
SUnit(SDNode *node, unsigned nodenum)
|
||||
: Node(node), Instr(0), OrigNode(0), SchedClass(0), NodeNum(nodenum),
|
||||
NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
|
||||
NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), NumRegDefsLeft(0),
|
||||
Latency(0), isVRegCycle(false), isCall(false), isCallOp(false),
|
||||
isTwoAddress(false), isCommutable(false), hasPhysRegUses(false),
|
||||
hasPhysRegDefs(false), hasPhysRegClobbers(false), isPending(false),
|
||||
isAvailable(false), isScheduled(false), isScheduleHigh(false),
|
||||
isScheduleLow(false), isCloned(false), SchedulingPref(Sched::None),
|
||||
isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
|
||||
TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
|
||||
|
||||
/// SUnit - Construct an SUnit for post-regalloc scheduling to represent
|
||||
/// a MachineInstr.
|
||||
SUnit(MachineInstr *instr, unsigned nodenum)
|
||||
: Node(0), Instr(instr), OrigNode(0), SchedClass(0), NodeNum(nodenum),
|
||||
NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
|
||||
NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), NumRegDefsLeft(0),
|
||||
Latency(0), isVRegCycle(false), isCall(false), isCallOp(false),
|
||||
isTwoAddress(false), isCommutable(false), hasPhysRegUses(false),
|
||||
hasPhysRegDefs(false), hasPhysRegClobbers(false), isPending(false),
|
||||
isAvailable(false), isScheduled(false), isScheduleHigh(false),
|
||||
isScheduleLow(false), isCloned(false), SchedulingPref(Sched::None),
|
||||
isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
|
||||
TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
|
||||
|
||||
/// SUnit - Construct a placeholder SUnit.
|
||||
SUnit()
|
||||
: Node(0), Instr(0), OrigNode(0), SchedClass(0), NodeNum(BoundaryID),
|
||||
NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
|
||||
NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), NumRegDefsLeft(0),
|
||||
Latency(0), isVRegCycle(false), isCall(false), isCallOp(false),
|
||||
isTwoAddress(false), isCommutable(false), hasPhysRegUses(false),
|
||||
hasPhysRegDefs(false), hasPhysRegClobbers(false), isPending(false),
|
||||
isAvailable(false), isScheduled(false), isScheduleHigh(false),
|
||||
isScheduleLow(false), isCloned(false), SchedulingPref(Sched::None),
|
||||
isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
|
||||
TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
|
||||
|
||||
/// \brief Boundary nodes are placeholders for the boundary of the
|
||||
/// scheduling region.
|
||||
///
|
||||
/// BoundaryNodes can have DAG edges, including Data edges, but they do not
|
||||
/// correspond to schedulable entities (e.g. instructions) and do not have a
|
||||
/// valid ID. Consequently, always check for boundary nodes before accessing
|
||||
/// an assoicative data structure keyed on node ID.
|
||||
bool isBoundaryNode() const { return NodeNum == BoundaryID; };
|
||||
|
||||
/// setNode - Assign the representative SDNode for this SUnit.
|
||||
/// This may be used during pre-regalloc scheduling.
|
||||
void setNode(SDNode *N) {
|
||||
assert(!Instr && "Setting SDNode of SUnit with MachineInstr!");
|
||||
Node = N;
|
||||
}
|
||||
|
||||
/// getNode - Return the representative SDNode for this SUnit.
|
||||
/// This may be used during pre-regalloc scheduling.
|
||||
SDNode *getNode() const {
|
||||
assert(!Instr && "Reading SDNode of SUnit with MachineInstr!");
|
||||
return Node;
|
||||
}
|
||||
|
||||
/// isInstr - Return true if this SUnit refers to a machine instruction as
|
||||
/// opposed to an SDNode.
|
||||
bool isInstr() const { return Instr; }
|
||||
|
||||
/// setInstr - Assign the instruction for the SUnit.
|
||||
/// This may be used during post-regalloc scheduling.
|
||||
void setInstr(MachineInstr *MI) {
|
||||
assert(!Node && "Setting MachineInstr of SUnit with SDNode!");
|
||||
Instr = MI;
|
||||
}
|
||||
|
||||
/// getInstr - Return the representative MachineInstr for this SUnit.
|
||||
/// This may be used during post-regalloc scheduling.
|
||||
MachineInstr *getInstr() const {
|
||||
assert(!Node && "Reading MachineInstr of SUnit with SDNode!");
|
||||
return Instr;
|
||||
}
|
||||
|
||||
/// addPred - This adds the specified edge as a pred of the current node if
|
||||
/// not already. It also adds the current node as a successor of the
|
||||
/// specified node.
|
||||
bool addPred(const SDep &D, bool Required = true);
|
||||
|
||||
/// removePred - This removes the specified edge as a pred of the current
|
||||
/// node if it exists. It also removes the current node as a successor of
|
||||
/// the specified node.
|
||||
void removePred(const SDep &D);
|
||||
|
||||
/// getDepth - Return the depth of this node, which is the length of the
|
||||
/// maximum path up to any node which has no predecessors.
|
||||
unsigned getDepth() const {
|
||||
if (!isDepthCurrent)
|
||||
const_cast<SUnit *>(this)->ComputeDepth();
|
||||
return Depth;
|
||||
}
|
||||
|
||||
/// getHeight - Return the height of this node, which is the length of the
|
||||
/// maximum path down to any node which has no successors.
|
||||
unsigned getHeight() const {
|
||||
if (!isHeightCurrent)
|
||||
const_cast<SUnit *>(this)->ComputeHeight();
|
||||
return Height;
|
||||
}
|
||||
|
||||
/// setDepthToAtLeast - If NewDepth is greater than this node's
|
||||
/// depth value, set it to be the new depth value. This also
|
||||
/// recursively marks successor nodes dirty.
|
||||
void setDepthToAtLeast(unsigned NewDepth);
|
||||
|
||||
/// setDepthToAtLeast - If NewDepth is greater than this node's
|
||||
/// depth value, set it to be the new height value. This also
|
||||
/// recursively marks predecessor nodes dirty.
|
||||
void setHeightToAtLeast(unsigned NewHeight);
|
||||
|
||||
/// setDepthDirty - Set a flag in this node to indicate that its
|
||||
/// stored Depth value will require recomputation the next time
|
||||
/// getDepth() is called.
|
||||
void setDepthDirty();
|
||||
|
||||
/// setHeightDirty - Set a flag in this node to indicate that its
|
||||
/// stored Height value will require recomputation the next time
|
||||
/// getHeight() is called.
|
||||
void setHeightDirty();
|
||||
|
||||
/// isPred - Test if node N is a predecessor of this node.
|
||||
bool isPred(SUnit *N) {
|
||||
for (unsigned i = 0, e = (unsigned)Preds.size(); i != e; ++i)
|
||||
if (Preds[i].getSUnit() == N)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/// isSucc - Test if node N is a successor of this node.
|
||||
bool isSucc(SUnit *N) {
|
||||
for (unsigned i = 0, e = (unsigned)Succs.size(); i != e; ++i)
|
||||
if (Succs[i].getSUnit() == N)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool isTopReady() const {
|
||||
return NumPredsLeft == 0;
|
||||
}
|
||||
bool isBottomReady() const {
|
||||
return NumSuccsLeft == 0;
|
||||
}
|
||||
|
||||
/// \brief Order this node's predecessor edges such that the critical path
|
||||
/// edge occurs first.
|
||||
void biasCriticalPath();
|
||||
|
||||
void dump(const ScheduleDAG *G) const;
|
||||
void dumpAll(const ScheduleDAG *G) const;
|
||||
void print(raw_ostream &O, const ScheduleDAG *G) const;
|
||||
|
||||
private:
|
||||
void ComputeDepth();
|
||||
void ComputeHeight();
|
||||
};
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
/// SchedulingPriorityQueue - This interface is used to plug different
|
||||
/// priorities computation algorithms into the list scheduler. It implements
|
||||
/// the interface of a standard priority queue, where nodes are inserted in
|
||||
/// arbitrary order and returned in priority order. The computation of the
|
||||
/// priority and the representation of the queue are totally up to the
|
||||
/// implementation to decide.
|
||||
///
|
||||
class SchedulingPriorityQueue {
|
||||
virtual void anchor();
|
||||
unsigned CurCycle;
|
||||
bool HasReadyFilter;
|
||||
public:
|
||||
SchedulingPriorityQueue(bool rf = false):
|
||||
CurCycle(0), HasReadyFilter(rf) {}
|
||||
virtual ~SchedulingPriorityQueue() {}
|
||||
|
||||
virtual bool isBottomUp() const = 0;
|
||||
|
||||
virtual void initNodes(std::vector<SUnit> &SUnits) = 0;
|
||||
virtual void addNode(const SUnit *SU) = 0;
|
||||
virtual void updateNode(const SUnit *SU) = 0;
|
||||
virtual void releaseState() = 0;
|
||||
|
||||
virtual bool empty() const = 0;
|
||||
|
||||
bool hasReadyFilter() const { return HasReadyFilter; }
|
||||
|
||||
virtual bool tracksRegPressure() const { return false; }
|
||||
|
||||
virtual bool isReady(SUnit *) const {
|
||||
assert(!HasReadyFilter && "The ready filter must override isReady()");
|
||||
return true;
|
||||
}
|
||||
virtual void push(SUnit *U) = 0;
|
||||
|
||||
void push_all(const std::vector<SUnit *> &Nodes) {
|
||||
for (std::vector<SUnit *>::const_iterator I = Nodes.begin(),
|
||||
E = Nodes.end(); I != E; ++I)
|
||||
push(*I);
|
||||
}
|
||||
|
||||
virtual SUnit *pop() = 0;
|
||||
|
||||
virtual void remove(SUnit *SU) = 0;
|
||||
|
||||
virtual void dump(ScheduleDAG *) const {}
|
||||
|
||||
/// scheduledNode - As each node is scheduled, this method is invoked. This
|
||||
/// allows the priority function to adjust the priority of related
|
||||
/// unscheduled nodes, for example.
|
||||
///
|
||||
virtual void scheduledNode(SUnit *) {}
|
||||
|
||||
virtual void unscheduledNode(SUnit *) {}
|
||||
|
||||
void setCurCycle(unsigned Cycle) {
|
||||
CurCycle = Cycle;
|
||||
}
|
||||
|
||||
unsigned getCurCycle() const {
|
||||
return CurCycle;
|
||||
}
|
||||
};
|
||||
|
||||
class ScheduleDAG {
|
||||
public:
|
||||
const TargetMachine &TM; // Target processor
|
||||
const TargetInstrInfo *TII; // Target instruction information
|
||||
const TargetRegisterInfo *TRI; // Target processor register info
|
||||
MachineFunction &MF; // Machine function
|
||||
MachineRegisterInfo &MRI; // Virtual/real register map
|
||||
std::vector<SUnit> SUnits; // The scheduling units.
|
||||
SUnit EntrySU; // Special node for the region entry.
|
||||
SUnit ExitSU; // Special node for the region exit.
|
||||
|
||||
#ifdef NDEBUG
|
||||
static const bool StressSched = false;
|
||||
#else
|
||||
bool StressSched;
|
||||
#endif
|
||||
|
||||
explicit ScheduleDAG(MachineFunction &mf);
|
||||
|
||||
virtual ~ScheduleDAG();
|
||||
|
||||
/// clearDAG - clear the DAG state (between regions).
|
||||
void clearDAG();
|
||||
|
||||
/// getInstrDesc - Return the MCInstrDesc of this SUnit.
|
||||
/// Return NULL for SDNodes without a machine opcode.
|
||||
const MCInstrDesc *getInstrDesc(const SUnit *SU) const {
|
||||
if (SU->isInstr()) return &SU->getInstr()->getDesc();
|
||||
return getNodeDesc(SU->getNode());
|
||||
}
|
||||
|
||||
/// viewGraph - Pop up a GraphViz/gv window with the ScheduleDAG rendered
|
||||
/// using 'dot'.
|
||||
///
|
||||
virtual void viewGraph(const Twine &Name, const Twine &Title);
|
||||
virtual void viewGraph();
|
||||
|
||||
virtual void dumpNode(const SUnit *SU) const = 0;
|
||||
|
||||
/// getGraphNodeLabel - Return a label for an SUnit node in a visualization
|
||||
/// of the ScheduleDAG.
|
||||
virtual std::string getGraphNodeLabel(const SUnit *SU) const = 0;
|
||||
|
||||
/// getDAGLabel - Return a label for the region of code covered by the DAG.
|
||||
virtual std::string getDAGName() const = 0;
|
||||
|
||||
/// addCustomGraphFeatures - Add custom features for a visualization of
|
||||
/// the ScheduleDAG.
|
||||
virtual void addCustomGraphFeatures(GraphWriter<ScheduleDAG*> &) const {}
|
||||
|
||||
#ifndef NDEBUG
|
||||
/// VerifyScheduledDAG - Verify that all SUnits were scheduled and that
|
||||
/// their state is consistent. Return the number of scheduled SUnits.
|
||||
unsigned VerifyScheduledDAG(bool isBottomUp);
|
||||
#endif
|
||||
|
||||
private:
|
||||
// Return the MCInstrDesc of this SDNode or NULL.
|
||||
const MCInstrDesc *getNodeDesc(const SDNode *Node) const;
|
||||
};
|
||||
|
||||
class SUnitIterator : public std::iterator<std::forward_iterator_tag,
|
||||
SUnit, ptrdiff_t> {
|
||||
SUnit *Node;
|
||||
unsigned Operand;
|
||||
|
||||
SUnitIterator(SUnit *N, unsigned Op) : Node(N), Operand(Op) {}
|
||||
public:
|
||||
bool operator==(const SUnitIterator& x) const {
|
||||
return Operand == x.Operand;
|
||||
}
|
||||
bool operator!=(const SUnitIterator& x) const { return !operator==(x); }
|
||||
|
||||
const SUnitIterator &operator=(const SUnitIterator &I) {
|
||||
assert(I.Node==Node && "Cannot assign iterators to two different nodes!");
|
||||
Operand = I.Operand;
|
||||
return *this;
|
||||
}
|
||||
|
||||
pointer operator*() const {
|
||||
return Node->Preds[Operand].getSUnit();
|
||||
}
|
||||
pointer operator->() const { return operator*(); }
|
||||
|
||||
SUnitIterator& operator++() { // Preincrement
|
||||
++Operand;
|
||||
return *this;
|
||||
}
|
||||
SUnitIterator operator++(int) { // Postincrement
|
||||
SUnitIterator tmp = *this; ++*this; return tmp;
|
||||
}
|
||||
|
||||
static SUnitIterator begin(SUnit *N) { return SUnitIterator(N, 0); }
|
||||
static SUnitIterator end (SUnit *N) {
|
||||
return SUnitIterator(N, (unsigned)N->Preds.size());
|
||||
}
|
||||
|
||||
unsigned getOperand() const { return Operand; }
|
||||
const SUnit *getNode() const { return Node; }
|
||||
/// isCtrlDep - Test if this is not an SDep::Data dependence.
|
||||
bool isCtrlDep() const {
|
||||
return getSDep().isCtrl();
|
||||
}
|
||||
bool isArtificialDep() const {
|
||||
return getSDep().isArtificial();
|
||||
}
|
||||
const SDep &getSDep() const {
|
||||
return Node->Preds[Operand];
|
||||
}
|
||||
};
|
||||
|
||||
template <> struct GraphTraits<SUnit*> {
|
||||
typedef SUnit NodeType;
|
||||
typedef SUnitIterator ChildIteratorType;
|
||||
static inline NodeType *getEntryNode(SUnit *N) { return N; }
|
||||
static inline ChildIteratorType child_begin(NodeType *N) {
|
||||
return SUnitIterator::begin(N);
|
||||
}
|
||||
static inline ChildIteratorType child_end(NodeType *N) {
|
||||
return SUnitIterator::end(N);
|
||||
}
|
||||
};
|
||||
|
||||
template <> struct GraphTraits<ScheduleDAG*> : public GraphTraits<SUnit*> {
|
||||
typedef std::vector<SUnit>::iterator nodes_iterator;
|
||||
static nodes_iterator nodes_begin(ScheduleDAG *G) {
|
||||
return G->SUnits.begin();
|
||||
}
|
||||
static nodes_iterator nodes_end(ScheduleDAG *G) {
|
||||
return G->SUnits.end();
|
||||
}
|
||||
};
|
||||
|
||||
/// ScheduleDAGTopologicalSort is a class that computes a topological
|
||||
/// ordering for SUnits and provides methods for dynamically updating
|
||||
/// the ordering as new edges are added.
|
||||
///
|
||||
/// This allows a very fast implementation of IsReachable, for example.
|
||||
///
|
||||
class ScheduleDAGTopologicalSort {
|
||||
/// SUnits - A reference to the ScheduleDAG's SUnits.
|
||||
std::vector<SUnit> &SUnits;
|
||||
SUnit *ExitSU;
|
||||
|
||||
/// Index2Node - Maps topological index to the node number.
|
||||
std::vector<int> Index2Node;
|
||||
/// Node2Index - Maps the node number to its topological index.
|
||||
std::vector<int> Node2Index;
|
||||
/// Visited - a set of nodes visited during a DFS traversal.
|
||||
BitVector Visited;
|
||||
|
||||
/// DFS - make a DFS traversal and mark all nodes affected by the
|
||||
/// edge insertion. These nodes will later get new topological indexes
|
||||
/// by means of the Shift method.
|
||||
void DFS(const SUnit *SU, int UpperBound, bool& HasLoop);
|
||||
|
||||
/// Shift - reassign topological indexes for the nodes in the DAG
|
||||
/// to preserve the topological ordering.
|
||||
void Shift(BitVector& Visited, int LowerBound, int UpperBound);
|
||||
|
||||
/// Allocate - assign the topological index to the node n.
|
||||
void Allocate(int n, int index);
|
||||
|
||||
public:
|
||||
ScheduleDAGTopologicalSort(std::vector<SUnit> &SUnits, SUnit *ExitSU);
|
||||
|
||||
/// InitDAGTopologicalSorting - create the initial topological
|
||||
/// ordering from the DAG to be scheduled.
|
||||
void InitDAGTopologicalSorting();
|
||||
|
||||
/// IsReachable - Checks if SU is reachable from TargetSU.
|
||||
bool IsReachable(const SUnit *SU, const SUnit *TargetSU);
|
||||
|
||||
/// WillCreateCycle - Returns true if adding an edge from SU to TargetSU
|
||||
/// will create a cycle.
|
||||
bool WillCreateCycle(SUnit *SU, SUnit *TargetSU);
|
||||
|
||||
/// AddPred - Updates the topological ordering to accommodate an edge
|
||||
/// to be added from SUnit X to SUnit Y.
|
||||
void AddPred(SUnit *Y, SUnit *X);
|
||||
|
||||
/// RemovePred - Updates the topological ordering to accommodate an
|
||||
/// an edge to be removed from the specified node N from the predecessors
|
||||
/// of the current node M.
|
||||
void RemovePred(SUnit *M, SUnit *N);
|
||||
|
||||
typedef std::vector<int>::iterator iterator;
|
||||
typedef std::vector<int>::const_iterator const_iterator;
|
||||
iterator begin() { return Index2Node.begin(); }
|
||||
const_iterator begin() const { return Index2Node.begin(); }
|
||||
iterator end() { return Index2Node.end(); }
|
||||
const_iterator end() const { return Index2Node.end(); }
|
||||
|
||||
typedef std::vector<int>::reverse_iterator reverse_iterator;
|
||||
typedef std::vector<int>::const_reverse_iterator const_reverse_iterator;
|
||||
reverse_iterator rbegin() { return Index2Node.rbegin(); }
|
||||
const_reverse_iterator rbegin() const { return Index2Node.rbegin(); }
|
||||
reverse_iterator rend() { return Index2Node.rend(); }
|
||||
const_reverse_iterator rend() const { return Index2Node.rend(); }
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
247
thirdparty/clang/include/llvm/CodeGen/ScheduleDAGInstrs.h
vendored
Normal file
247
thirdparty/clang/include/llvm/CodeGen/ScheduleDAGInstrs.h
vendored
Normal file
@@ -0,0 +1,247 @@
|
||||
//==- ScheduleDAGInstrs.h - MachineInstr Scheduling --------------*- C++ -*-==//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file implements the ScheduleDAGInstrs class, which implements
|
||||
// scheduling for a MachineInstr-based dependency graph.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_SCHEDULEDAGINSTRS_H
|
||||
#define LLVM_CODEGEN_SCHEDULEDAGINSTRS_H
|
||||
|
||||
#include "llvm/ADT/SparseSet.h"
|
||||
#include "llvm/ADT/SparseMultiSet.h"
|
||||
#include "llvm/CodeGen/ScheduleDAG.h"
|
||||
#include "llvm/CodeGen/TargetSchedule.h"
|
||||
#include "llvm/Support/Compiler.h"
|
||||
#include "llvm/Target/TargetRegisterInfo.h"
|
||||
|
||||
namespace llvm {
|
||||
class MachineFrameInfo;
|
||||
class MachineLoopInfo;
|
||||
class MachineDominatorTree;
|
||||
class LiveIntervals;
|
||||
class RegPressureTracker;
|
||||
|
||||
/// An individual mapping from virtual register number to SUnit.
|
||||
struct VReg2SUnit {
|
||||
unsigned VirtReg;
|
||||
SUnit *SU;
|
||||
|
||||
VReg2SUnit(unsigned reg, SUnit *su): VirtReg(reg), SU(su) {}
|
||||
|
||||
unsigned getSparseSetIndex() const {
|
||||
return TargetRegisterInfo::virtReg2Index(VirtReg);
|
||||
}
|
||||
};
|
||||
|
||||
/// Record a physical register access.
|
||||
/// For non data-dependent uses, OpIdx == -1.
|
||||
struct PhysRegSUOper {
|
||||
SUnit *SU;
|
||||
int OpIdx;
|
||||
unsigned Reg;
|
||||
|
||||
PhysRegSUOper(SUnit *su, int op, unsigned R): SU(su), OpIdx(op), Reg(R) {}
|
||||
|
||||
unsigned getSparseSetIndex() const { return Reg; }
|
||||
};
|
||||
|
||||
/// Use a SparseMultiSet to track physical registers. Storage is only
|
||||
/// allocated once for the pass. It can be cleared in constant time and reused
|
||||
/// without any frees.
|
||||
typedef SparseMultiSet<PhysRegSUOper, llvm::identity<unsigned>, uint16_t> Reg2SUnitsMap;
|
||||
|
||||
/// Use SparseSet as a SparseMap by relying on the fact that it never
|
||||
/// compares ValueT's, only unsigned keys. This allows the set to be cleared
|
||||
/// between scheduling regions in constant time as long as ValueT does not
|
||||
/// require a destructor.
|
||||
typedef SparseSet<VReg2SUnit, VirtReg2IndexFunctor> VReg2SUnitMap;
|
||||
|
||||
/// ScheduleDAGInstrs - A ScheduleDAG subclass for scheduling lists of
|
||||
/// MachineInstrs.
|
||||
class ScheduleDAGInstrs : public ScheduleDAG {
|
||||
protected:
|
||||
const MachineLoopInfo &MLI;
|
||||
const MachineDominatorTree &MDT;
|
||||
const MachineFrameInfo *MFI;
|
||||
|
||||
/// Live Intervals provides reaching defs in preRA scheduling.
|
||||
LiveIntervals *LIS;
|
||||
|
||||
/// TargetSchedModel provides an interface to the machine model.
|
||||
TargetSchedModel SchedModel;
|
||||
|
||||
/// isPostRA flag indicates vregs cannot be present.
|
||||
bool IsPostRA;
|
||||
|
||||
/// UnitLatencies (misnamed) flag avoids computing def-use latencies, using
|
||||
/// the def-side latency only.
|
||||
bool UnitLatencies;
|
||||
|
||||
/// The standard DAG builder does not normally include terminators as DAG
|
||||
/// nodes because it does not create the necessary dependencies to prevent
|
||||
/// reordering. A specialized scheduler can overide
|
||||
/// TargetInstrInfo::isSchedulingBoundary then enable this flag to indicate
|
||||
/// it has taken responsibility for scheduling the terminator correctly.
|
||||
bool CanHandleTerminators;
|
||||
|
||||
/// State specific to the current scheduling region.
|
||||
/// ------------------------------------------------
|
||||
|
||||
/// The block in which to insert instructions
|
||||
MachineBasicBlock *BB;
|
||||
|
||||
/// The beginning of the range to be scheduled.
|
||||
MachineBasicBlock::iterator RegionBegin;
|
||||
|
||||
/// The end of the range to be scheduled.
|
||||
MachineBasicBlock::iterator RegionEnd;
|
||||
|
||||
/// The index in BB of RegionEnd.
|
||||
unsigned EndIndex;
|
||||
|
||||
/// After calling BuildSchedGraph, each machine instruction in the current
|
||||
/// scheduling region is mapped to an SUnit.
|
||||
DenseMap<MachineInstr*, SUnit*> MISUnitMap;
|
||||
|
||||
/// State internal to DAG building.
|
||||
/// -------------------------------
|
||||
|
||||
/// Defs, Uses - Remember where defs and uses of each register are as we
|
||||
/// iterate upward through the instructions. This is allocated here instead
|
||||
/// of inside BuildSchedGraph to avoid the need for it to be initialized and
|
||||
/// destructed for each block.
|
||||
Reg2SUnitsMap Defs;
|
||||
Reg2SUnitsMap Uses;
|
||||
|
||||
/// Track the last instructon in this region defining each virtual register.
|
||||
VReg2SUnitMap VRegDefs;
|
||||
|
||||
/// PendingLoads - Remember where unknown loads are after the most recent
|
||||
/// unknown store, as we iterate. As with Defs and Uses, this is here
|
||||
/// to minimize construction/destruction.
|
||||
std::vector<SUnit *> PendingLoads;
|
||||
|
||||
/// DbgValues - Remember instruction that precedes DBG_VALUE.
|
||||
/// These are generated by buildSchedGraph but persist so they can be
|
||||
/// referenced when emitting the final schedule.
|
||||
typedef std::vector<std::pair<MachineInstr *, MachineInstr *> >
|
||||
DbgValueVector;
|
||||
DbgValueVector DbgValues;
|
||||
MachineInstr *FirstDbgValue;
|
||||
|
||||
public:
|
||||
explicit ScheduleDAGInstrs(MachineFunction &mf,
|
||||
const MachineLoopInfo &mli,
|
||||
const MachineDominatorTree &mdt,
|
||||
bool IsPostRAFlag,
|
||||
LiveIntervals *LIS = 0);
|
||||
|
||||
virtual ~ScheduleDAGInstrs() {}
|
||||
|
||||
/// \brief Get the machine model for instruction scheduling.
|
||||
const TargetSchedModel *getSchedModel() const { return &SchedModel; }
|
||||
|
||||
/// \brief Resolve and cache a resolved scheduling class for an SUnit.
|
||||
const MCSchedClassDesc *getSchedClass(SUnit *SU) const {
|
||||
if (!SU->SchedClass)
|
||||
SU->SchedClass = SchedModel.resolveSchedClass(SU->getInstr());
|
||||
return SU->SchedClass;
|
||||
}
|
||||
|
||||
/// begin - Return an iterator to the top of the current scheduling region.
|
||||
MachineBasicBlock::iterator begin() const { return RegionBegin; }
|
||||
|
||||
/// end - Return an iterator to the bottom of the current scheduling region.
|
||||
MachineBasicBlock::iterator end() const { return RegionEnd; }
|
||||
|
||||
/// newSUnit - Creates a new SUnit and return a ptr to it.
|
||||
SUnit *newSUnit(MachineInstr *MI);
|
||||
|
||||
/// getSUnit - Return an existing SUnit for this MI, or NULL.
|
||||
SUnit *getSUnit(MachineInstr *MI) const;
|
||||
|
||||
/// startBlock - Prepare to perform scheduling in the given block.
|
||||
virtual void startBlock(MachineBasicBlock *BB);
|
||||
|
||||
/// finishBlock - Clean up after scheduling in the given block.
|
||||
virtual void finishBlock();
|
||||
|
||||
/// Initialize the scheduler state for the next scheduling region.
|
||||
virtual void enterRegion(MachineBasicBlock *bb,
|
||||
MachineBasicBlock::iterator begin,
|
||||
MachineBasicBlock::iterator end,
|
||||
unsigned endcount);
|
||||
|
||||
/// Notify that the scheduler has finished scheduling the current region.
|
||||
virtual void exitRegion();
|
||||
|
||||
/// buildSchedGraph - Build SUnits from the MachineBasicBlock that we are
|
||||
/// input.
|
||||
void buildSchedGraph(AliasAnalysis *AA, RegPressureTracker *RPTracker = 0);
|
||||
|
||||
/// addSchedBarrierDeps - Add dependencies from instructions in the current
|
||||
/// list of instructions being scheduled to scheduling barrier. We want to
|
||||
/// make sure instructions which define registers that are either used by
|
||||
/// the terminator or are live-out are properly scheduled. This is
|
||||
/// especially important when the definition latency of the return value(s)
|
||||
/// are too high to be hidden by the branch or when the liveout registers
|
||||
/// used by instructions in the fallthrough block.
|
||||
void addSchedBarrierDeps();
|
||||
|
||||
/// schedule - Order nodes according to selected style, filling
|
||||
/// in the Sequence member.
|
||||
///
|
||||
/// Typically, a scheduling algorithm will implement schedule() without
|
||||
/// overriding enterRegion() or exitRegion().
|
||||
virtual void schedule() = 0;
|
||||
|
||||
/// finalizeSchedule - Allow targets to perform final scheduling actions at
|
||||
/// the level of the whole MachineFunction. By default does nothing.
|
||||
virtual void finalizeSchedule() {}
|
||||
|
||||
virtual void dumpNode(const SUnit *SU) const;
|
||||
|
||||
/// Return a label for a DAG node that points to an instruction.
|
||||
virtual std::string getGraphNodeLabel(const SUnit *SU) const;
|
||||
|
||||
/// Return a label for the region of code covered by the DAG.
|
||||
virtual std::string getDAGName() const;
|
||||
|
||||
protected:
|
||||
void initSUnits();
|
||||
void addPhysRegDataDeps(SUnit *SU, unsigned OperIdx);
|
||||
void addPhysRegDeps(SUnit *SU, unsigned OperIdx);
|
||||
void addVRegDefDeps(SUnit *SU, unsigned OperIdx);
|
||||
void addVRegUseDeps(SUnit *SU, unsigned OperIdx);
|
||||
};
|
||||
|
||||
/// newSUnit - Creates a new SUnit and return a ptr to it.
|
||||
inline SUnit *ScheduleDAGInstrs::newSUnit(MachineInstr *MI) {
|
||||
#ifndef NDEBUG
|
||||
const SUnit *Addr = SUnits.empty() ? 0 : &SUnits[0];
|
||||
#endif
|
||||
SUnits.push_back(SUnit(MI, (unsigned)SUnits.size()));
|
||||
assert((Addr == 0 || Addr == &SUnits[0]) &&
|
||||
"SUnits std::vector reallocated on the fly!");
|
||||
SUnits.back().OrigNode = &SUnits.back();
|
||||
return &SUnits.back();
|
||||
}
|
||||
|
||||
/// getSUnit - Return an existing SUnit for this MI, or NULL.
|
||||
inline SUnit *ScheduleDAGInstrs::getSUnit(MachineInstr *MI) const {
|
||||
DenseMap<MachineInstr*, SUnit*>::const_iterator I = MISUnitMap.find(MI);
|
||||
if (I == MISUnitMap.end())
|
||||
return 0;
|
||||
return I->second;
|
||||
}
|
||||
} // namespace llvm
|
||||
|
||||
#endif
|
||||
196
thirdparty/clang/include/llvm/CodeGen/ScheduleDFS.h
vendored
Normal file
196
thirdparty/clang/include/llvm/CodeGen/ScheduleDFS.h
vendored
Normal file
@@ -0,0 +1,196 @@
|
||||
//===- ScheduleDAGILP.h - ILP metric for ScheduleDAGInstrs ------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Definition of an ILP metric for machine level instruction scheduling.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_SCHEDULEDFS_H
|
||||
#define LLVM_CODEGEN_SCHEDULEDFS_H
|
||||
|
||||
#include "llvm/CodeGen/ScheduleDAG.h"
|
||||
#include "llvm/Support/DataTypes.h"
|
||||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class raw_ostream;
|
||||
class IntEqClasses;
|
||||
class ScheduleDAGInstrs;
|
||||
class SUnit;
|
||||
|
||||
/// \brief Represent the ILP of the subDAG rooted at a DAG node.
|
||||
///
|
||||
/// ILPValues summarize the DAG subtree rooted at each node. ILPValues are
|
||||
/// valid for all nodes regardless of their subtree membership.
|
||||
///
|
||||
/// When computed using bottom-up DFS, this metric assumes that the DAG is a
|
||||
/// forest of trees with roots at the bottom of the schedule branching upward.
|
||||
struct ILPValue {
|
||||
unsigned InstrCount;
|
||||
/// Length may either correspond to depth or height, depending on direction,
|
||||
/// and cycles or nodes depending on context.
|
||||
unsigned Length;
|
||||
|
||||
ILPValue(unsigned count, unsigned length):
|
||||
InstrCount(count), Length(length) {}
|
||||
|
||||
// Order by the ILP metric's value.
|
||||
bool operator<(ILPValue RHS) const {
|
||||
return (uint64_t)InstrCount * RHS.Length
|
||||
< (uint64_t)Length * RHS.InstrCount;
|
||||
}
|
||||
bool operator>(ILPValue RHS) const {
|
||||
return RHS < *this;
|
||||
}
|
||||
bool operator<=(ILPValue RHS) const {
|
||||
return (uint64_t)InstrCount * RHS.Length
|
||||
<= (uint64_t)Length * RHS.InstrCount;
|
||||
}
|
||||
bool operator>=(ILPValue RHS) const {
|
||||
return RHS <= *this;
|
||||
}
|
||||
|
||||
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
||||
void print(raw_ostream &OS) const;
|
||||
|
||||
void dump() const;
|
||||
#endif
|
||||
};
|
||||
|
||||
/// \brief Compute the values of each DAG node for various metrics during DFS.
|
||||
class SchedDFSResult {
|
||||
friend class SchedDFSImpl;
|
||||
|
||||
static const unsigned InvalidSubtreeID = ~0u;
|
||||
|
||||
/// \brief Per-SUnit data computed during DFS for various metrics.
|
||||
///
|
||||
/// A node's SubtreeID is set to itself when it is visited to indicate that it
|
||||
/// is the root of a subtree. Later it is set to its parent to indicate an
|
||||
/// interior node. Finally, it is set to a representative subtree ID during
|
||||
/// finalization.
|
||||
struct NodeData {
|
||||
unsigned InstrCount;
|
||||
unsigned SubtreeID;
|
||||
|
||||
NodeData(): InstrCount(0), SubtreeID(InvalidSubtreeID) {}
|
||||
};
|
||||
|
||||
/// \brief Per-Subtree data computed during DFS.
|
||||
struct TreeData {
|
||||
unsigned ParentTreeID;
|
||||
unsigned SubInstrCount;
|
||||
|
||||
TreeData(): ParentTreeID(InvalidSubtreeID), SubInstrCount(0) {}
|
||||
};
|
||||
|
||||
/// \brief Record a connection between subtrees and the connection level.
|
||||
struct Connection {
|
||||
unsigned TreeID;
|
||||
unsigned Level;
|
||||
|
||||
Connection(unsigned tree, unsigned level): TreeID(tree), Level(level) {}
|
||||
};
|
||||
|
||||
bool IsBottomUp;
|
||||
unsigned SubtreeLimit;
|
||||
/// DFS results for each SUnit in this DAG.
|
||||
std::vector<NodeData> DFSNodeData;
|
||||
|
||||
// Store per-tree data indexed on tree ID,
|
||||
SmallVector<TreeData, 16> DFSTreeData;
|
||||
|
||||
// For each subtree discovered during DFS, record its connections to other
|
||||
// subtrees.
|
||||
std::vector<SmallVector<Connection, 4> > SubtreeConnections;
|
||||
|
||||
/// Cache the current connection level of each subtree.
|
||||
/// This mutable array is updated during scheduling.
|
||||
std::vector<unsigned> SubtreeConnectLevels;
|
||||
|
||||
public:
|
||||
SchedDFSResult(bool IsBU, unsigned lim)
|
||||
: IsBottomUp(IsBU), SubtreeLimit(lim) {}
|
||||
|
||||
/// \brief Get the node cutoff before subtrees are considered significant.
|
||||
unsigned getSubtreeLimit() const { return SubtreeLimit; }
|
||||
|
||||
/// \brief Return true if this DFSResult is uninitialized.
|
||||
///
|
||||
/// resize() initializes DFSResult, while compute() populates it.
|
||||
bool empty() const { return DFSNodeData.empty(); }
|
||||
|
||||
/// \brief Clear the results.
|
||||
void clear() {
|
||||
DFSNodeData.clear();
|
||||
DFSTreeData.clear();
|
||||
SubtreeConnections.clear();
|
||||
SubtreeConnectLevels.clear();
|
||||
}
|
||||
|
||||
/// \brief Initialize the result data with the size of the DAG.
|
||||
void resize(unsigned NumSUnits) {
|
||||
DFSNodeData.resize(NumSUnits);
|
||||
}
|
||||
|
||||
/// \brief Compute various metrics for the DAG with given roots.
|
||||
void compute(ArrayRef<SUnit> SUnits);
|
||||
|
||||
/// \brief Get the number of instructions in the given subtree and its
|
||||
/// children.
|
||||
unsigned getNumInstrs(const SUnit *SU) const {
|
||||
return DFSNodeData[SU->NodeNum].InstrCount;
|
||||
}
|
||||
|
||||
/// \brief Get the number of instructions in the given subtree not including
|
||||
/// children.
|
||||
unsigned getNumSubInstrs(unsigned SubtreeID) const {
|
||||
return DFSTreeData[SubtreeID].SubInstrCount;
|
||||
}
|
||||
|
||||
/// \brief Get the ILP value for a DAG node.
|
||||
///
|
||||
/// A leaf node has an ILP of 1/1.
|
||||
ILPValue getILP(const SUnit *SU) const {
|
||||
return ILPValue(DFSNodeData[SU->NodeNum].InstrCount, 1 + SU->getDepth());
|
||||
}
|
||||
|
||||
/// \brief The number of subtrees detected in this DAG.
|
||||
unsigned getNumSubtrees() const { return SubtreeConnectLevels.size(); }
|
||||
|
||||
/// \brief Get the ID of the subtree the given DAG node belongs to.
|
||||
///
|
||||
/// For convenience, if DFSResults have not been computed yet, give everything
|
||||
/// tree ID 0.
|
||||
unsigned getSubtreeID(const SUnit *SU) const {
|
||||
if (empty())
|
||||
return 0;
|
||||
assert(SU->NodeNum < DFSNodeData.size() && "New Node");
|
||||
return DFSNodeData[SU->NodeNum].SubtreeID;
|
||||
}
|
||||
|
||||
/// \brief Get the connection level of a subtree.
|
||||
///
|
||||
/// For bottom-up trees, the connection level is the latency depth (in cycles)
|
||||
/// of the deepest connection to another subtree.
|
||||
unsigned getSubtreeLevel(unsigned SubtreeID) const {
|
||||
return SubtreeConnectLevels[SubtreeID];
|
||||
}
|
||||
|
||||
/// \brief Scheduler callback to update SubtreeConnectLevels when a tree is
|
||||
/// initially scheduled.
|
||||
void scheduleTree(unsigned SubtreeID);
|
||||
};
|
||||
|
||||
raw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val);
|
||||
|
||||
} // namespace llvm
|
||||
|
||||
#endif
|
||||
95
thirdparty/clang/include/llvm/CodeGen/ScheduleHazardRecognizer.h
vendored
Normal file
95
thirdparty/clang/include/llvm/CodeGen/ScheduleHazardRecognizer.h
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
//=- llvm/CodeGen/ScheduleHazardRecognizer.h - Scheduling Support -*- C++ -*-=//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file implements the ScheduleHazardRecognizer class, which implements
|
||||
// hazard-avoidance heuristics for scheduling.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_SCHEDULEHAZARDRECOGNIZER_H
|
||||
#define LLVM_CODEGEN_SCHEDULEHAZARDRECOGNIZER_H
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class SUnit;
|
||||
|
||||
/// HazardRecognizer - This determines whether or not an instruction can be
|
||||
/// issued this cycle, and whether or not a noop needs to be inserted to handle
|
||||
/// the hazard.
|
||||
class ScheduleHazardRecognizer {
|
||||
protected:
|
||||
/// MaxLookAhead - Indicate the number of cycles in the scoreboard
|
||||
/// state. Important to restore the state after backtracking. Additionally,
|
||||
/// MaxLookAhead=0 identifies a fake recognizer, allowing the client to
|
||||
/// bypass virtual calls. Currently the PostRA scheduler ignores it.
|
||||
unsigned MaxLookAhead;
|
||||
|
||||
public:
|
||||
ScheduleHazardRecognizer(): MaxLookAhead(0) {}
|
||||
virtual ~ScheduleHazardRecognizer();
|
||||
|
||||
enum HazardType {
|
||||
NoHazard, // This instruction can be emitted at this cycle.
|
||||
Hazard, // This instruction can't be emitted at this cycle.
|
||||
NoopHazard // This instruction can't be emitted, and needs noops.
|
||||
};
|
||||
|
||||
unsigned getMaxLookAhead() const { return MaxLookAhead; }
|
||||
|
||||
bool isEnabled() const { return MaxLookAhead != 0; }
|
||||
|
||||
/// atIssueLimit - Return true if no more instructions may be issued in this
|
||||
/// cycle.
|
||||
///
|
||||
/// FIXME: remove this once MachineScheduler is the only client.
|
||||
virtual bool atIssueLimit() const { return false; }
|
||||
|
||||
/// getHazardType - Return the hazard type of emitting this node. There are
|
||||
/// three possible results. Either:
|
||||
/// * NoHazard: it is legal to issue this instruction on this cycle.
|
||||
/// * Hazard: issuing this instruction would stall the machine. If some
|
||||
/// other instruction is available, issue it first.
|
||||
/// * NoopHazard: issuing this instruction would break the program. If
|
||||
/// some other instruction can be issued, do so, otherwise issue a noop.
|
||||
virtual HazardType getHazardType(SUnit *m, int Stalls = 0) {
|
||||
return NoHazard;
|
||||
}
|
||||
|
||||
/// Reset - This callback is invoked when a new block of
|
||||
/// instructions is about to be schedule. The hazard state should be
|
||||
/// set to an initialized state.
|
||||
virtual void Reset() {}
|
||||
|
||||
/// EmitInstruction - This callback is invoked when an instruction is
|
||||
/// emitted, to advance the hazard state.
|
||||
virtual void EmitInstruction(SUnit *) {}
|
||||
|
||||
/// AdvanceCycle - This callback is invoked whenever the next top-down
|
||||
/// instruction to be scheduled cannot issue in the current cycle, either
|
||||
/// because of latency or resource conflicts. This should increment the
|
||||
/// internal state of the hazard recognizer so that previously "Hazard"
|
||||
/// instructions will now not be hazards.
|
||||
virtual void AdvanceCycle() {}
|
||||
|
||||
/// RecedeCycle - This callback is invoked whenever the next bottom-up
|
||||
/// instruction to be scheduled cannot issue in the current cycle, either
|
||||
/// because of latency or resource conflicts.
|
||||
virtual void RecedeCycle() {}
|
||||
|
||||
/// EmitNoop - This callback is invoked when a noop was added to the
|
||||
/// instruction stream.
|
||||
virtual void EmitNoop() {
|
||||
// Default implementation: count it as a cycle.
|
||||
AdvanceCycle();
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
112
thirdparty/clang/include/llvm/CodeGen/SchedulerRegistry.h
vendored
Normal file
112
thirdparty/clang/include/llvm/CodeGen/SchedulerRegistry.h
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
//===-- llvm/CodeGen/SchedulerRegistry.h ------------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file contains the implementation for instruction scheduler function
|
||||
// pass registry (RegisterScheduler).
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_SCHEDULERREGISTRY_H
|
||||
#define LLVM_CODEGEN_SCHEDULERREGISTRY_H
|
||||
|
||||
#include "llvm/CodeGen/MachinePassRegistry.h"
|
||||
#include "llvm/Target/TargetMachine.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
///
|
||||
/// RegisterScheduler class - Track the registration of instruction schedulers.
|
||||
///
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
class SelectionDAGISel;
|
||||
class ScheduleDAGSDNodes;
|
||||
class SelectionDAG;
|
||||
class MachineBasicBlock;
|
||||
|
||||
class RegisterScheduler : public MachinePassRegistryNode {
|
||||
public:
|
||||
typedef ScheduleDAGSDNodes *(*FunctionPassCtor)(SelectionDAGISel*,
|
||||
CodeGenOpt::Level);
|
||||
|
||||
static MachinePassRegistry Registry;
|
||||
|
||||
RegisterScheduler(const char *N, const char *D, FunctionPassCtor C)
|
||||
: MachinePassRegistryNode(N, D, (MachinePassCtor)C)
|
||||
{ Registry.Add(this); }
|
||||
~RegisterScheduler() { Registry.Remove(this); }
|
||||
|
||||
|
||||
// Accessors.
|
||||
//
|
||||
RegisterScheduler *getNext() const {
|
||||
return (RegisterScheduler *)MachinePassRegistryNode::getNext();
|
||||
}
|
||||
static RegisterScheduler *getList() {
|
||||
return (RegisterScheduler *)Registry.getList();
|
||||
}
|
||||
static FunctionPassCtor getDefault() {
|
||||
return (FunctionPassCtor)Registry.getDefault();
|
||||
}
|
||||
static void setDefault(FunctionPassCtor C) {
|
||||
Registry.setDefault((MachinePassCtor)C);
|
||||
}
|
||||
static void setListener(MachinePassRegistryListener *L) {
|
||||
Registry.setListener(L);
|
||||
}
|
||||
};
|
||||
|
||||
/// createBURRListDAGScheduler - This creates a bottom up register usage
|
||||
/// reduction list scheduler.
|
||||
ScheduleDAGSDNodes *createBURRListDAGScheduler(SelectionDAGISel *IS,
|
||||
CodeGenOpt::Level OptLevel);
|
||||
|
||||
/// createBURRListDAGScheduler - This creates a bottom up list scheduler that
|
||||
/// schedules nodes in source code order when possible.
|
||||
ScheduleDAGSDNodes *createSourceListDAGScheduler(SelectionDAGISel *IS,
|
||||
CodeGenOpt::Level OptLevel);
|
||||
|
||||
/// createHybridListDAGScheduler - This creates a bottom up register pressure
|
||||
/// aware list scheduler that make use of latency information to avoid stalls
|
||||
/// for long latency instructions in low register pressure mode. In high
|
||||
/// register pressure mode it schedules to reduce register pressure.
|
||||
ScheduleDAGSDNodes *createHybridListDAGScheduler(SelectionDAGISel *IS,
|
||||
CodeGenOpt::Level);
|
||||
|
||||
/// createILPListDAGScheduler - This creates a bottom up register pressure
|
||||
/// aware list scheduler that tries to increase instruction level parallelism
|
||||
/// in low register pressure mode. In high register pressure mode it schedules
|
||||
/// to reduce register pressure.
|
||||
ScheduleDAGSDNodes *createILPListDAGScheduler(SelectionDAGISel *IS,
|
||||
CodeGenOpt::Level);
|
||||
|
||||
/// createFastDAGScheduler - This creates a "fast" scheduler.
|
||||
///
|
||||
ScheduleDAGSDNodes *createFastDAGScheduler(SelectionDAGISel *IS,
|
||||
CodeGenOpt::Level OptLevel);
|
||||
|
||||
/// createVLIWDAGScheduler - Scheduler for VLIW targets. This creates top down
|
||||
/// DFA driven list scheduler with clustering heuristic to control
|
||||
/// register pressure.
|
||||
ScheduleDAGSDNodes *createVLIWDAGScheduler(SelectionDAGISel *IS,
|
||||
CodeGenOpt::Level OptLevel);
|
||||
/// createDefaultScheduler - This creates an instruction scheduler appropriate
|
||||
/// for the target.
|
||||
ScheduleDAGSDNodes *createDefaultScheduler(SelectionDAGISel *IS,
|
||||
CodeGenOpt::Level OptLevel);
|
||||
|
||||
/// createDAGLinearizer - This creates a "no-scheduling" scheduler which
|
||||
/// linearize the DAG using topological order.
|
||||
ScheduleDAGSDNodes *createDAGLinearizer(SelectionDAGISel *IS,
|
||||
CodeGenOpt::Level OptLevel);
|
||||
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
126
thirdparty/clang/include/llvm/CodeGen/ScoreboardHazardRecognizer.h
vendored
Normal file
126
thirdparty/clang/include/llvm/CodeGen/ScoreboardHazardRecognizer.h
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
//=- llvm/CodeGen/ScoreboardHazardRecognizer.h - Schedule Support -*- C++ -*-=//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines the ScoreboardHazardRecognizer class, which
|
||||
// encapsulates hazard-avoidance heuristics for scheduling, based on the
|
||||
// scheduling itineraries specified for the target.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_SCOREBOARDHAZARDRECOGNIZER_H
|
||||
#define LLVM_CODEGEN_SCOREBOARDHAZARDRECOGNIZER_H
|
||||
|
||||
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
|
||||
#include "llvm/Support/DataTypes.h"
|
||||
#include <cassert>
|
||||
#include <cstring>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class InstrItineraryData;
|
||||
class ScheduleDAG;
|
||||
class SUnit;
|
||||
|
||||
class ScoreboardHazardRecognizer : public ScheduleHazardRecognizer {
|
||||
// Scoreboard to track function unit usage. Scoreboard[0] is a
|
||||
// mask of the FUs in use in the cycle currently being
|
||||
// schedule. Scoreboard[1] is a mask for the next cycle. The
|
||||
// Scoreboard is used as a circular buffer with the current cycle
|
||||
// indicated by Head.
|
||||
//
|
||||
// Scoreboard always counts cycles in forward execution order. If used by a
|
||||
// bottom-up scheduler, then the scoreboard cycles are the inverse of the
|
||||
// scheduler's cycles.
|
||||
class Scoreboard {
|
||||
unsigned *Data;
|
||||
|
||||
// The maximum number of cycles monitored by the Scoreboard. This
|
||||
// value is determined based on the target itineraries to ensure
|
||||
// that all hazards can be tracked.
|
||||
size_t Depth;
|
||||
// Indices into the Scoreboard that represent the current cycle.
|
||||
size_t Head;
|
||||
public:
|
||||
Scoreboard():Data(NULL), Depth(0), Head(0) { }
|
||||
~Scoreboard() {
|
||||
delete[] Data;
|
||||
}
|
||||
|
||||
size_t getDepth() const { return Depth; }
|
||||
unsigned& operator[](size_t idx) const {
|
||||
// Depth is expected to be a power-of-2.
|
||||
assert(Depth && !(Depth & (Depth - 1)) &&
|
||||
"Scoreboard was not initialized properly!");
|
||||
|
||||
return Data[(Head + idx) & (Depth-1)];
|
||||
}
|
||||
|
||||
void reset(size_t d = 1) {
|
||||
if (Data == NULL) {
|
||||
Depth = d;
|
||||
Data = new unsigned[Depth];
|
||||
}
|
||||
|
||||
memset(Data, 0, Depth * sizeof(Data[0]));
|
||||
Head = 0;
|
||||
}
|
||||
|
||||
void advance() {
|
||||
Head = (Head + 1) & (Depth-1);
|
||||
}
|
||||
|
||||
void recede() {
|
||||
Head = (Head - 1) & (Depth-1);
|
||||
}
|
||||
|
||||
// Print the scoreboard.
|
||||
void dump() const;
|
||||
};
|
||||
|
||||
#ifndef NDEBUG
|
||||
// Support for tracing ScoreboardHazardRecognizer as a component within
|
||||
// another module. Follows the current thread-unsafe model of tracing.
|
||||
static const char *DebugType;
|
||||
#endif
|
||||
|
||||
// Itinerary data for the target.
|
||||
const InstrItineraryData *ItinData;
|
||||
|
||||
const ScheduleDAG *DAG;
|
||||
|
||||
/// IssueWidth - Max issue per cycle. 0=Unknown.
|
||||
unsigned IssueWidth;
|
||||
|
||||
/// IssueCount - Count instructions issued in this cycle.
|
||||
unsigned IssueCount;
|
||||
|
||||
Scoreboard ReservedScoreboard;
|
||||
Scoreboard RequiredScoreboard;
|
||||
|
||||
public:
|
||||
ScoreboardHazardRecognizer(const InstrItineraryData *ItinData,
|
||||
const ScheduleDAG *DAG,
|
||||
const char *ParentDebugType = "");
|
||||
|
||||
/// atIssueLimit - Return true if no more instructions may be issued in this
|
||||
/// cycle.
|
||||
virtual bool atIssueLimit() const;
|
||||
|
||||
// Stalls provides an cycle offset at which SU will be scheduled. It will be
|
||||
// negative for bottom-up scheduling.
|
||||
virtual HazardType getHazardType(SUnit *SU, int Stalls);
|
||||
virtual void Reset();
|
||||
virtual void EmitInstruction(SUnit *SU);
|
||||
virtual void AdvanceCycle();
|
||||
virtual void RecedeCycle();
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif //!LLVM_CODEGEN_SCOREBOARDHAZARDRECOGNIZER_H
|
||||
1109
thirdparty/clang/include/llvm/CodeGen/SelectionDAG.h
vendored
Normal file
1109
thirdparty/clang/include/llvm/CodeGen/SelectionDAG.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
291
thirdparty/clang/include/llvm/CodeGen/SelectionDAGISel.h
vendored
Normal file
291
thirdparty/clang/include/llvm/CodeGen/SelectionDAGISel.h
vendored
Normal file
@@ -0,0 +1,291 @@
|
||||
//===-- llvm/CodeGen/SelectionDAGISel.h - Common Base Class------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file implements the SelectionDAGISel class, which is used as the common
|
||||
// base class for SelectionDAG-based instruction selectors.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_SELECTIONDAGISEL_H
|
||||
#define LLVM_CODEGEN_SELECTIONDAGISEL_H
|
||||
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
#include "llvm/CodeGen/SelectionDAG.h"
|
||||
#include "llvm/IR/BasicBlock.h"
|
||||
#include "llvm/Pass.h"
|
||||
|
||||
namespace llvm {
|
||||
class FastISel;
|
||||
class SelectionDAGBuilder;
|
||||
class SDValue;
|
||||
class MachineRegisterInfo;
|
||||
class MachineBasicBlock;
|
||||
class MachineFunction;
|
||||
class MachineInstr;
|
||||
class TargetLowering;
|
||||
class TargetLibraryInfo;
|
||||
class TargetInstrInfo;
|
||||
class TargetTransformInfo;
|
||||
class FunctionLoweringInfo;
|
||||
class ScheduleHazardRecognizer;
|
||||
class GCFunctionInfo;
|
||||
class ScheduleDAGSDNodes;
|
||||
class LoadInst;
|
||||
|
||||
/// SelectionDAGISel - This is the common base class used for SelectionDAG-based
|
||||
/// pattern-matching instruction selectors.
|
||||
class SelectionDAGISel : public MachineFunctionPass {
|
||||
public:
|
||||
const TargetMachine &TM;
|
||||
const TargetLowering &TLI;
|
||||
const TargetLibraryInfo *LibInfo;
|
||||
const TargetTransformInfo *TTI;
|
||||
FunctionLoweringInfo *FuncInfo;
|
||||
MachineFunction *MF;
|
||||
MachineRegisterInfo *RegInfo;
|
||||
SelectionDAG *CurDAG;
|
||||
SelectionDAGBuilder *SDB;
|
||||
AliasAnalysis *AA;
|
||||
GCFunctionInfo *GFI;
|
||||
CodeGenOpt::Level OptLevel;
|
||||
static char ID;
|
||||
|
||||
explicit SelectionDAGISel(const TargetMachine &tm,
|
||||
CodeGenOpt::Level OL = CodeGenOpt::Default);
|
||||
virtual ~SelectionDAGISel();
|
||||
|
||||
const TargetLowering &getTargetLowering() { return TLI; }
|
||||
|
||||
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
|
||||
|
||||
virtual bool runOnMachineFunction(MachineFunction &MF);
|
||||
|
||||
virtual void EmitFunctionEntryCode() {}
|
||||
|
||||
/// PreprocessISelDAG - This hook allows targets to hack on the graph before
|
||||
/// instruction selection starts.
|
||||
virtual void PreprocessISelDAG() {}
|
||||
|
||||
/// PostprocessISelDAG() - This hook allows the target to hack on the graph
|
||||
/// right after selection.
|
||||
virtual void PostprocessISelDAG() {}
|
||||
|
||||
/// Select - Main hook targets implement to select a node.
|
||||
virtual SDNode *Select(SDNode *N) = 0;
|
||||
|
||||
/// SelectInlineAsmMemoryOperand - Select the specified address as a target
|
||||
/// addressing mode, according to the specified constraint code. If this does
|
||||
/// not match or is not implemented, return true. The resultant operands
|
||||
/// (which will appear in the machine instruction) should be added to the
|
||||
/// OutOps vector.
|
||||
virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
|
||||
char ConstraintCode,
|
||||
std::vector<SDValue> &OutOps) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/// IsProfitableToFold - Returns true if it's profitable to fold the specific
|
||||
/// operand node N of U during instruction selection that starts at Root.
|
||||
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const;
|
||||
|
||||
/// IsLegalToFold - Returns true if the specific operand node N of
|
||||
/// U can be folded during instruction selection that starts at Root.
|
||||
/// FIXME: This is a static member function because the MSP430/X86
|
||||
/// targets, which uses it during isel. This could become a proper member.
|
||||
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
|
||||
CodeGenOpt::Level OptLevel,
|
||||
bool IgnoreChains = false);
|
||||
|
||||
// Opcodes used by the DAG state machine:
|
||||
enum BuiltinOpcodes {
|
||||
OPC_Scope,
|
||||
OPC_RecordNode,
|
||||
OPC_RecordChild0, OPC_RecordChild1, OPC_RecordChild2, OPC_RecordChild3,
|
||||
OPC_RecordChild4, OPC_RecordChild5, OPC_RecordChild6, OPC_RecordChild7,
|
||||
OPC_RecordMemRef,
|
||||
OPC_CaptureGlueInput,
|
||||
OPC_MoveChild,
|
||||
OPC_MoveParent,
|
||||
OPC_CheckSame,
|
||||
OPC_CheckPatternPredicate,
|
||||
OPC_CheckPredicate,
|
||||
OPC_CheckOpcode,
|
||||
OPC_SwitchOpcode,
|
||||
OPC_CheckType,
|
||||
OPC_SwitchType,
|
||||
OPC_CheckChild0Type, OPC_CheckChild1Type, OPC_CheckChild2Type,
|
||||
OPC_CheckChild3Type, OPC_CheckChild4Type, OPC_CheckChild5Type,
|
||||
OPC_CheckChild6Type, OPC_CheckChild7Type,
|
||||
OPC_CheckInteger,
|
||||
OPC_CheckCondCode,
|
||||
OPC_CheckValueType,
|
||||
OPC_CheckComplexPat,
|
||||
OPC_CheckAndImm, OPC_CheckOrImm,
|
||||
OPC_CheckFoldableChainNode,
|
||||
|
||||
OPC_EmitInteger,
|
||||
OPC_EmitRegister,
|
||||
OPC_EmitRegister2,
|
||||
OPC_EmitConvertToTarget,
|
||||
OPC_EmitMergeInputChains,
|
||||
OPC_EmitMergeInputChains1_0,
|
||||
OPC_EmitMergeInputChains1_1,
|
||||
OPC_EmitCopyToReg,
|
||||
OPC_EmitNodeXForm,
|
||||
OPC_EmitNode,
|
||||
OPC_MorphNodeTo,
|
||||
OPC_MarkGlueResults,
|
||||
OPC_CompleteMatch
|
||||
};
|
||||
|
||||
enum {
|
||||
OPFL_None = 0, // Node has no chain or glue input and isn't variadic.
|
||||
OPFL_Chain = 1, // Node has a chain input.
|
||||
OPFL_GlueInput = 2, // Node has a glue input.
|
||||
OPFL_GlueOutput = 4, // Node has a glue output.
|
||||
OPFL_MemRefs = 8, // Node gets accumulated MemRefs.
|
||||
OPFL_Variadic0 = 1<<4, // Node is variadic, root has 0 fixed inputs.
|
||||
OPFL_Variadic1 = 2<<4, // Node is variadic, root has 1 fixed inputs.
|
||||
OPFL_Variadic2 = 3<<4, // Node is variadic, root has 2 fixed inputs.
|
||||
OPFL_Variadic3 = 4<<4, // Node is variadic, root has 3 fixed inputs.
|
||||
OPFL_Variadic4 = 5<<4, // Node is variadic, root has 4 fixed inputs.
|
||||
OPFL_Variadic5 = 6<<4, // Node is variadic, root has 5 fixed inputs.
|
||||
OPFL_Variadic6 = 7<<4, // Node is variadic, root has 6 fixed inputs.
|
||||
|
||||
OPFL_VariadicInfo = OPFL_Variadic6
|
||||
};
|
||||
|
||||
/// getNumFixedFromVariadicInfo - Transform an EmitNode flags word into the
|
||||
/// number of fixed arity values that should be skipped when copying from the
|
||||
/// root.
|
||||
static inline int getNumFixedFromVariadicInfo(unsigned Flags) {
|
||||
return ((Flags&OPFL_VariadicInfo) >> 4)-1;
|
||||
}
|
||||
|
||||
|
||||
protected:
|
||||
/// DAGSize - Size of DAG being instruction selected.
|
||||
///
|
||||
unsigned DAGSize;
|
||||
|
||||
/// ReplaceUses - replace all uses of the old node F with the use
|
||||
/// of the new node T.
|
||||
void ReplaceUses(SDValue F, SDValue T) {
|
||||
CurDAG->ReplaceAllUsesOfValueWith(F, T);
|
||||
}
|
||||
|
||||
/// ReplaceUses - replace all uses of the old nodes F with the use
|
||||
/// of the new nodes T.
|
||||
void ReplaceUses(const SDValue *F, const SDValue *T, unsigned Num) {
|
||||
CurDAG->ReplaceAllUsesOfValuesWith(F, T, Num);
|
||||
}
|
||||
|
||||
/// ReplaceUses - replace all uses of the old node F with the use
|
||||
/// of the new node T.
|
||||
void ReplaceUses(SDNode *F, SDNode *T) {
|
||||
CurDAG->ReplaceAllUsesWith(F, T);
|
||||
}
|
||||
|
||||
|
||||
/// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
|
||||
/// by tblgen. Others should not call it.
|
||||
void SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops);
|
||||
|
||||
|
||||
public:
|
||||
// Calls to these predicates are generated by tblgen.
|
||||
bool CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
|
||||
int64_t DesiredMaskS) const;
|
||||
bool CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
|
||||
int64_t DesiredMaskS) const;
|
||||
|
||||
|
||||
/// CheckPatternPredicate - This function is generated by tblgen in the
|
||||
/// target. It runs the specified pattern predicate and returns true if it
|
||||
/// succeeds or false if it fails. The number is a private implementation
|
||||
/// detail to the code tblgen produces.
|
||||
virtual bool CheckPatternPredicate(unsigned PredNo) const {
|
||||
llvm_unreachable("Tblgen should generate the implementation of this!");
|
||||
}
|
||||
|
||||
/// CheckNodePredicate - This function is generated by tblgen in the target.
|
||||
/// It runs node predicate number PredNo and returns true if it succeeds or
|
||||
/// false if it fails. The number is a private implementation
|
||||
/// detail to the code tblgen produces.
|
||||
virtual bool CheckNodePredicate(SDNode *N, unsigned PredNo) const {
|
||||
llvm_unreachable("Tblgen should generate the implementation of this!");
|
||||
}
|
||||
|
||||
virtual bool CheckComplexPattern(SDNode *Root, SDNode *Parent, SDValue N,
|
||||
unsigned PatternNo,
|
||||
SmallVectorImpl<std::pair<SDValue, SDNode*> > &Result) {
|
||||
llvm_unreachable("Tblgen should generate the implementation of this!");
|
||||
}
|
||||
|
||||
virtual SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) {
|
||||
llvm_unreachable("Tblgen should generate this!");
|
||||
}
|
||||
|
||||
SDNode *SelectCodeCommon(SDNode *NodeToMatch,
|
||||
const unsigned char *MatcherTable,
|
||||
unsigned TableSize);
|
||||
|
||||
private:
|
||||
|
||||
// Calls to these functions are generated by tblgen.
|
||||
SDNode *Select_INLINEASM(SDNode *N);
|
||||
SDNode *Select_UNDEF(SDNode *N);
|
||||
void CannotYetSelect(SDNode *N);
|
||||
|
||||
private:
|
||||
void DoInstructionSelection();
|
||||
SDNode *MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTs,
|
||||
const SDValue *Ops, unsigned NumOps, unsigned EmitNodeInfo);
|
||||
|
||||
void PrepareEHLandingPad();
|
||||
|
||||
/// \brief Perform instruction selection on all basic blocks in the function.
|
||||
void SelectAllBasicBlocks(const Function &Fn);
|
||||
|
||||
/// \brief Perform instruction selection on a single basic block, for
|
||||
/// instructions between \p Begin and \p End. \p HadTailCall will be set
|
||||
/// to true if a call in the block was translated as a tail call.
|
||||
void SelectBasicBlock(BasicBlock::const_iterator Begin,
|
||||
BasicBlock::const_iterator End,
|
||||
bool &HadTailCall);
|
||||
void FinishBasicBlock();
|
||||
|
||||
void CodeGenAndEmitDAG();
|
||||
|
||||
/// \brief Generate instructions for lowering the incoming arguments of the
|
||||
/// given function.
|
||||
void LowerArguments(const Function &F);
|
||||
|
||||
void ComputeLiveOutVRegInfo();
|
||||
|
||||
/// Create the scheduler. If a specific scheduler was specified
|
||||
/// via the SchedulerRegistry, use it, otherwise select the
|
||||
/// one preferred by the target.
|
||||
///
|
||||
ScheduleDAGSDNodes *CreateScheduler();
|
||||
|
||||
/// OpcodeOffset - This is a cache used to dispatch efficiently into isel
|
||||
/// state machines that start with a OPC_SwitchOpcode node.
|
||||
std::vector<unsigned> OpcodeOffset;
|
||||
|
||||
void UpdateChainsAndGlue(SDNode *NodeToMatch, SDValue InputChain,
|
||||
const SmallVectorImpl<SDNode*> &ChainNodesMatched,
|
||||
SDValue InputGlue, const SmallVectorImpl<SDNode*> &F,
|
||||
bool isMorphNodeTo);
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif /* LLVM_CODEGEN_SELECTIONDAGISEL_H */
|
||||
1865
thirdparty/clang/include/llvm/CodeGen/SelectionDAGNodes.h
vendored
Normal file
1865
thirdparty/clang/include/llvm/CodeGen/SelectionDAGNodes.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
703
thirdparty/clang/include/llvm/CodeGen/SlotIndexes.h
vendored
Normal file
703
thirdparty/clang/include/llvm/CodeGen/SlotIndexes.h
vendored
Normal file
@@ -0,0 +1,703 @@
|
||||
//===- llvm/CodeGen/SlotIndexes.h - Slot indexes representation -*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file implements SlotIndex and related classes. The purpose of SlotIndex
|
||||
// is to describe a position at which a register can become live, or cease to
|
||||
// be live.
|
||||
//
|
||||
// SlotIndex is mostly a proxy for entries of the SlotIndexList, a class which
|
||||
// is held is LiveIntervals and provides the real numbering. This allows
|
||||
// LiveIntervals to perform largely transparent renumbering.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_SLOTINDEXES_H
|
||||
#define LLVM_CODEGEN_SLOTINDEXES_H
|
||||
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/ADT/IntervalMap.h"
|
||||
#include "llvm/ADT/PointerIntPair.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/ADT/ilist.h"
|
||||
#include "llvm/CodeGen/MachineFunction.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
#include "llvm/CodeGen/MachineInstrBundle.h"
|
||||
#include "llvm/Support/Allocator.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
/// This class represents an entry in the slot index list held in the
|
||||
/// SlotIndexes pass. It should not be used directly. See the
|
||||
/// SlotIndex & SlotIndexes classes for the public interface to this
|
||||
/// information.
|
||||
class IndexListEntry : public ilist_node<IndexListEntry> {
|
||||
MachineInstr *mi;
|
||||
unsigned index;
|
||||
|
||||
public:
|
||||
|
||||
IndexListEntry(MachineInstr *mi, unsigned index) : mi(mi), index(index) {}
|
||||
|
||||
MachineInstr* getInstr() const { return mi; }
|
||||
void setInstr(MachineInstr *mi) {
|
||||
this->mi = mi;
|
||||
}
|
||||
|
||||
unsigned getIndex() const { return index; }
|
||||
void setIndex(unsigned index) {
|
||||
this->index = index;
|
||||
}
|
||||
|
||||
#ifdef EXPENSIVE_CHECKS
|
||||
// When EXPENSIVE_CHECKS is defined, "erased" index list entries will
|
||||
// actually be moved to a "graveyard" list, and have their pointers
|
||||
// poisoned, so that dangling SlotIndex access can be reliably detected.
|
||||
void setPoison() {
|
||||
intptr_t tmp = reinterpret_cast<intptr_t>(mi);
|
||||
assert(((tmp & 0x1) == 0x0) && "Pointer already poisoned?");
|
||||
tmp |= 0x1;
|
||||
mi = reinterpret_cast<MachineInstr*>(tmp);
|
||||
}
|
||||
|
||||
bool isPoisoned() const { return (reinterpret_cast<intptr_t>(mi) & 0x1) == 0x1; }
|
||||
#endif // EXPENSIVE_CHECKS
|
||||
|
||||
};
|
||||
|
||||
template <>
|
||||
struct ilist_traits<IndexListEntry> : public ilist_default_traits<IndexListEntry> {
|
||||
private:
|
||||
mutable ilist_half_node<IndexListEntry> Sentinel;
|
||||
public:
|
||||
IndexListEntry *createSentinel() const {
|
||||
return static_cast<IndexListEntry*>(&Sentinel);
|
||||
}
|
||||
void destroySentinel(IndexListEntry *) const {}
|
||||
|
||||
IndexListEntry *provideInitialHead() const { return createSentinel(); }
|
||||
IndexListEntry *ensureHead(IndexListEntry*) const { return createSentinel(); }
|
||||
static void noteHead(IndexListEntry*, IndexListEntry*) {}
|
||||
void deleteNode(IndexListEntry *N) {}
|
||||
|
||||
private:
|
||||
void createNode(const IndexListEntry &);
|
||||
};
|
||||
|
||||
/// SlotIndex - An opaque wrapper around machine indexes.
|
||||
class SlotIndex {
|
||||
friend class SlotIndexes;
|
||||
|
||||
enum Slot {
|
||||
/// Basic block boundary. Used for live ranges entering and leaving a
|
||||
/// block without being live in the layout neighbor. Also used as the
|
||||
/// def slot of PHI-defs.
|
||||
Slot_Block,
|
||||
|
||||
/// Early-clobber register use/def slot. A live range defined at
|
||||
/// Slot_EarlyCLobber interferes with normal live ranges killed at
|
||||
/// Slot_Register. Also used as the kill slot for live ranges tied to an
|
||||
/// early-clobber def.
|
||||
Slot_EarlyClobber,
|
||||
|
||||
/// Normal register use/def slot. Normal instructions kill and define
|
||||
/// register live ranges at this slot.
|
||||
Slot_Register,
|
||||
|
||||
/// Dead def kill point. Kill slot for a live range that is defined by
|
||||
/// the same instruction (Slot_Register or Slot_EarlyClobber), but isn't
|
||||
/// used anywhere.
|
||||
Slot_Dead,
|
||||
|
||||
Slot_Count
|
||||
};
|
||||
|
||||
PointerIntPair<IndexListEntry*, 2, unsigned> lie;
|
||||
|
||||
SlotIndex(IndexListEntry *entry, unsigned slot)
|
||||
: lie(entry, slot) {}
|
||||
|
||||
IndexListEntry* listEntry() const {
|
||||
assert(isValid() && "Attempt to compare reserved index.");
|
||||
#ifdef EXPENSIVE_CHECKS
|
||||
assert(!lie.getPointer()->isPoisoned() &&
|
||||
"Attempt to access deleted list-entry.");
|
||||
#endif // EXPENSIVE_CHECKS
|
||||
return lie.getPointer();
|
||||
}
|
||||
|
||||
unsigned getIndex() const {
|
||||
return listEntry()->getIndex() | getSlot();
|
||||
}
|
||||
|
||||
/// Returns the slot for this SlotIndex.
|
||||
Slot getSlot() const {
|
||||
return static_cast<Slot>(lie.getInt());
|
||||
}
|
||||
|
||||
public:
|
||||
enum {
|
||||
/// The default distance between instructions as returned by distance().
|
||||
/// This may vary as instructions are inserted and removed.
|
||||
InstrDist = 4 * Slot_Count
|
||||
};
|
||||
|
||||
/// Construct an invalid index.
|
||||
SlotIndex() : lie(0, 0) {}
|
||||
|
||||
// Construct a new slot index from the given one, and set the slot.
|
||||
SlotIndex(const SlotIndex &li, Slot s) : lie(li.listEntry(), unsigned(s)) {
|
||||
assert(lie.getPointer() != 0 &&
|
||||
"Attempt to construct index with 0 pointer.");
|
||||
}
|
||||
|
||||
/// Returns true if this is a valid index. Invalid indicies do
|
||||
/// not point into an index table, and cannot be compared.
|
||||
bool isValid() const {
|
||||
return lie.getPointer();
|
||||
}
|
||||
|
||||
/// Return true for a valid index.
|
||||
operator bool() const { return isValid(); }
|
||||
|
||||
/// Print this index to the given raw_ostream.
|
||||
void print(raw_ostream &os) const;
|
||||
|
||||
/// Dump this index to stderr.
|
||||
void dump() const;
|
||||
|
||||
/// Compare two SlotIndex objects for equality.
|
||||
bool operator==(SlotIndex other) const {
|
||||
return lie == other.lie;
|
||||
}
|
||||
/// Compare two SlotIndex objects for inequality.
|
||||
bool operator!=(SlotIndex other) const {
|
||||
return lie != other.lie;
|
||||
}
|
||||
|
||||
/// Compare two SlotIndex objects. Return true if the first index
|
||||
/// is strictly lower than the second.
|
||||
bool operator<(SlotIndex other) const {
|
||||
return getIndex() < other.getIndex();
|
||||
}
|
||||
/// Compare two SlotIndex objects. Return true if the first index
|
||||
/// is lower than, or equal to, the second.
|
||||
bool operator<=(SlotIndex other) const {
|
||||
return getIndex() <= other.getIndex();
|
||||
}
|
||||
|
||||
/// Compare two SlotIndex objects. Return true if the first index
|
||||
/// is greater than the second.
|
||||
bool operator>(SlotIndex other) const {
|
||||
return getIndex() > other.getIndex();
|
||||
}
|
||||
|
||||
/// Compare two SlotIndex objects. Return true if the first index
|
||||
/// is greater than, or equal to, the second.
|
||||
bool operator>=(SlotIndex other) const {
|
||||
return getIndex() >= other.getIndex();
|
||||
}
|
||||
|
||||
/// isSameInstr - Return true if A and B refer to the same instruction.
|
||||
static bool isSameInstr(SlotIndex A, SlotIndex B) {
|
||||
return A.lie.getPointer() == B.lie.getPointer();
|
||||
}
|
||||
|
||||
/// isEarlierInstr - Return true if A refers to an instruction earlier than
|
||||
/// B. This is equivalent to A < B && !isSameInstr(A, B).
|
||||
static bool isEarlierInstr(SlotIndex A, SlotIndex B) {
|
||||
return A.listEntry()->getIndex() < B.listEntry()->getIndex();
|
||||
}
|
||||
|
||||
/// Return the distance from this index to the given one.
|
||||
int distance(SlotIndex other) const {
|
||||
return other.getIndex() - getIndex();
|
||||
}
|
||||
|
||||
/// isBlock - Returns true if this is a block boundary slot.
|
||||
bool isBlock() const { return getSlot() == Slot_Block; }
|
||||
|
||||
/// isEarlyClobber - Returns true if this is an early-clobber slot.
|
||||
bool isEarlyClobber() const { return getSlot() == Slot_EarlyClobber; }
|
||||
|
||||
/// isRegister - Returns true if this is a normal register use/def slot.
|
||||
/// Note that early-clobber slots may also be used for uses and defs.
|
||||
bool isRegister() const { return getSlot() == Slot_Register; }
|
||||
|
||||
/// isDead - Returns true if this is a dead def kill slot.
|
||||
bool isDead() const { return getSlot() == Slot_Dead; }
|
||||
|
||||
/// Returns the base index for associated with this index. The base index
|
||||
/// is the one associated with the Slot_Block slot for the instruction
|
||||
/// pointed to by this index.
|
||||
SlotIndex getBaseIndex() const {
|
||||
return SlotIndex(listEntry(), Slot_Block);
|
||||
}
|
||||
|
||||
/// Returns the boundary index for associated with this index. The boundary
|
||||
/// index is the one associated with the Slot_Block slot for the instruction
|
||||
/// pointed to by this index.
|
||||
SlotIndex getBoundaryIndex() const {
|
||||
return SlotIndex(listEntry(), Slot_Dead);
|
||||
}
|
||||
|
||||
/// Returns the register use/def slot in the current instruction for a
|
||||
/// normal or early-clobber def.
|
||||
SlotIndex getRegSlot(bool EC = false) const {
|
||||
return SlotIndex(listEntry(), EC ? Slot_EarlyClobber : Slot_Register);
|
||||
}
|
||||
|
||||
/// Returns the dead def kill slot for the current instruction.
|
||||
SlotIndex getDeadSlot() const {
|
||||
return SlotIndex(listEntry(), Slot_Dead);
|
||||
}
|
||||
|
||||
/// Returns the next slot in the index list. This could be either the
|
||||
/// next slot for the instruction pointed to by this index or, if this
|
||||
/// index is a STORE, the first slot for the next instruction.
|
||||
/// WARNING: This method is considerably more expensive than the methods
|
||||
/// that return specific slots (getUseIndex(), etc). If you can - please
|
||||
/// use one of those methods.
|
||||
SlotIndex getNextSlot() const {
|
||||
Slot s = getSlot();
|
||||
if (s == Slot_Dead) {
|
||||
return SlotIndex(listEntry()->getNextNode(), Slot_Block);
|
||||
}
|
||||
return SlotIndex(listEntry(), s + 1);
|
||||
}
|
||||
|
||||
/// Returns the next index. This is the index corresponding to the this
|
||||
/// index's slot, but for the next instruction.
|
||||
SlotIndex getNextIndex() const {
|
||||
return SlotIndex(listEntry()->getNextNode(), getSlot());
|
||||
}
|
||||
|
||||
/// Returns the previous slot in the index list. This could be either the
|
||||
/// previous slot for the instruction pointed to by this index or, if this
|
||||
/// index is a Slot_Block, the last slot for the previous instruction.
|
||||
/// WARNING: This method is considerably more expensive than the methods
|
||||
/// that return specific slots (getUseIndex(), etc). If you can - please
|
||||
/// use one of those methods.
|
||||
SlotIndex getPrevSlot() const {
|
||||
Slot s = getSlot();
|
||||
if (s == Slot_Block) {
|
||||
return SlotIndex(listEntry()->getPrevNode(), Slot_Dead);
|
||||
}
|
||||
return SlotIndex(listEntry(), s - 1);
|
||||
}
|
||||
|
||||
/// Returns the previous index. This is the index corresponding to this
|
||||
/// index's slot, but for the previous instruction.
|
||||
SlotIndex getPrevIndex() const {
|
||||
return SlotIndex(listEntry()->getPrevNode(), getSlot());
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
template <> struct isPodLike<SlotIndex> { static const bool value = true; };
|
||||
|
||||
inline raw_ostream& operator<<(raw_ostream &os, SlotIndex li) {
|
||||
li.print(os);
|
||||
return os;
|
||||
}
|
||||
|
||||
typedef std::pair<SlotIndex, MachineBasicBlock*> IdxMBBPair;
|
||||
|
||||
inline bool operator<(SlotIndex V, const IdxMBBPair &IM) {
|
||||
return V < IM.first;
|
||||
}
|
||||
|
||||
inline bool operator<(const IdxMBBPair &IM, SlotIndex V) {
|
||||
return IM.first < V;
|
||||
}
|
||||
|
||||
struct Idx2MBBCompare {
|
||||
bool operator()(const IdxMBBPair &LHS, const IdxMBBPair &RHS) const {
|
||||
return LHS.first < RHS.first;
|
||||
}
|
||||
};
|
||||
|
||||
/// SlotIndexes pass.
|
||||
///
|
||||
/// This pass assigns indexes to each instruction.
|
||||
class SlotIndexes : public MachineFunctionPass {
|
||||
private:
|
||||
|
||||
typedef ilist<IndexListEntry> IndexList;
|
||||
IndexList indexList;
|
||||
|
||||
#ifdef EXPENSIVE_CHECKS
|
||||
IndexList graveyardList;
|
||||
#endif // EXPENSIVE_CHECKS
|
||||
|
||||
MachineFunction *mf;
|
||||
|
||||
typedef DenseMap<const MachineInstr*, SlotIndex> Mi2IndexMap;
|
||||
Mi2IndexMap mi2iMap;
|
||||
|
||||
/// MBBRanges - Map MBB number to (start, stop) indexes.
|
||||
SmallVector<std::pair<SlotIndex, SlotIndex>, 8> MBBRanges;
|
||||
|
||||
/// Idx2MBBMap - Sorted list of pairs of index of first instruction
|
||||
/// and MBB id.
|
||||
SmallVector<IdxMBBPair, 8> idx2MBBMap;
|
||||
|
||||
// IndexListEntry allocator.
|
||||
BumpPtrAllocator ileAllocator;
|
||||
|
||||
IndexListEntry* createEntry(MachineInstr *mi, unsigned index) {
|
||||
IndexListEntry *entry =
|
||||
static_cast<IndexListEntry*>(
|
||||
ileAllocator.Allocate(sizeof(IndexListEntry),
|
||||
alignOf<IndexListEntry>()));
|
||||
|
||||
new (entry) IndexListEntry(mi, index);
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
/// Renumber locally after inserting curItr.
|
||||
void renumberIndexes(IndexList::iterator curItr);
|
||||
|
||||
public:
|
||||
static char ID;
|
||||
|
||||
SlotIndexes() : MachineFunctionPass(ID) {
|
||||
initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
|
||||
}
|
||||
|
||||
virtual void getAnalysisUsage(AnalysisUsage &au) const;
|
||||
virtual void releaseMemory();
|
||||
|
||||
virtual bool runOnMachineFunction(MachineFunction &fn);
|
||||
|
||||
/// Dump the indexes.
|
||||
void dump() const;
|
||||
|
||||
/// Renumber the index list, providing space for new instructions.
|
||||
void renumberIndexes();
|
||||
|
||||
/// Repair indexes after adding and removing instructions.
|
||||
void repairIndexesInRange(MachineBasicBlock *MBB,
|
||||
MachineBasicBlock::iterator Begin,
|
||||
MachineBasicBlock::iterator End);
|
||||
|
||||
/// Returns the zero index for this analysis.
|
||||
SlotIndex getZeroIndex() {
|
||||
assert(indexList.front().getIndex() == 0 && "First index is not 0?");
|
||||
return SlotIndex(&indexList.front(), 0);
|
||||
}
|
||||
|
||||
/// Returns the base index of the last slot in this analysis.
|
||||
SlotIndex getLastIndex() {
|
||||
return SlotIndex(&indexList.back(), 0);
|
||||
}
|
||||
|
||||
/// Returns true if the given machine instr is mapped to an index,
|
||||
/// otherwise returns false.
|
||||
bool hasIndex(const MachineInstr *instr) const {
|
||||
return mi2iMap.count(instr);
|
||||
}
|
||||
|
||||
/// Returns the base index for the given instruction.
|
||||
SlotIndex getInstructionIndex(const MachineInstr *MI) const {
|
||||
// Instructions inside a bundle have the same number as the bundle itself.
|
||||
Mi2IndexMap::const_iterator itr = mi2iMap.find(getBundleStart(MI));
|
||||
assert(itr != mi2iMap.end() && "Instruction not found in maps.");
|
||||
return itr->second;
|
||||
}
|
||||
|
||||
/// Returns the instruction for the given index, or null if the given
|
||||
/// index has no instruction associated with it.
|
||||
MachineInstr* getInstructionFromIndex(SlotIndex index) const {
|
||||
return index.isValid() ? index.listEntry()->getInstr() : 0;
|
||||
}
|
||||
|
||||
/// Returns the next non-null index, if one exists.
|
||||
/// Otherwise returns getLastIndex().
|
||||
SlotIndex getNextNonNullIndex(SlotIndex Index) {
|
||||
IndexList::iterator I = Index.listEntry();
|
||||
IndexList::iterator E = indexList.end();
|
||||
while (++I != E)
|
||||
if (I->getInstr())
|
||||
return SlotIndex(I, Index.getSlot());
|
||||
// We reached the end of the function.
|
||||
return getLastIndex();
|
||||
}
|
||||
|
||||
/// getIndexBefore - Returns the index of the last indexed instruction
|
||||
/// before MI, or the start index of its basic block.
|
||||
/// MI is not required to have an index.
|
||||
SlotIndex getIndexBefore(const MachineInstr *MI) const {
|
||||
const MachineBasicBlock *MBB = MI->getParent();
|
||||
assert(MBB && "MI must be inserted inna basic block");
|
||||
MachineBasicBlock::const_iterator I = MI, B = MBB->begin();
|
||||
for (;;) {
|
||||
if (I == B)
|
||||
return getMBBStartIdx(MBB);
|
||||
--I;
|
||||
Mi2IndexMap::const_iterator MapItr = mi2iMap.find(I);
|
||||
if (MapItr != mi2iMap.end())
|
||||
return MapItr->second;
|
||||
}
|
||||
}
|
||||
|
||||
/// getIndexAfter - Returns the index of the first indexed instruction
|
||||
/// after MI, or the end index of its basic block.
|
||||
/// MI is not required to have an index.
|
||||
SlotIndex getIndexAfter(const MachineInstr *MI) const {
|
||||
const MachineBasicBlock *MBB = MI->getParent();
|
||||
assert(MBB && "MI must be inserted inna basic block");
|
||||
MachineBasicBlock::const_iterator I = MI, E = MBB->end();
|
||||
for (;;) {
|
||||
++I;
|
||||
if (I == E)
|
||||
return getMBBEndIdx(MBB);
|
||||
Mi2IndexMap::const_iterator MapItr = mi2iMap.find(I);
|
||||
if (MapItr != mi2iMap.end())
|
||||
return MapItr->second;
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the (start,end) range of the given basic block number.
|
||||
const std::pair<SlotIndex, SlotIndex> &
|
||||
getMBBRange(unsigned Num) const {
|
||||
return MBBRanges[Num];
|
||||
}
|
||||
|
||||
/// Return the (start,end) range of the given basic block.
|
||||
const std::pair<SlotIndex, SlotIndex> &
|
||||
getMBBRange(const MachineBasicBlock *MBB) const {
|
||||
return getMBBRange(MBB->getNumber());
|
||||
}
|
||||
|
||||
/// Returns the first index in the given basic block number.
|
||||
SlotIndex getMBBStartIdx(unsigned Num) const {
|
||||
return getMBBRange(Num).first;
|
||||
}
|
||||
|
||||
/// Returns the first index in the given basic block.
|
||||
SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const {
|
||||
return getMBBRange(mbb).first;
|
||||
}
|
||||
|
||||
/// Returns the last index in the given basic block number.
|
||||
SlotIndex getMBBEndIdx(unsigned Num) const {
|
||||
return getMBBRange(Num).second;
|
||||
}
|
||||
|
||||
/// Returns the last index in the given basic block.
|
||||
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const {
|
||||
return getMBBRange(mbb).second;
|
||||
}
|
||||
|
||||
/// Returns the basic block which the given index falls in.
|
||||
MachineBasicBlock* getMBBFromIndex(SlotIndex index) const {
|
||||
if (MachineInstr *MI = getInstructionFromIndex(index))
|
||||
return MI->getParent();
|
||||
SmallVectorImpl<IdxMBBPair>::const_iterator I =
|
||||
std::lower_bound(idx2MBBMap.begin(), idx2MBBMap.end(), index);
|
||||
// Take the pair containing the index
|
||||
SmallVectorImpl<IdxMBBPair>::const_iterator J =
|
||||
((I != idx2MBBMap.end() && I->first > index) ||
|
||||
(I == idx2MBBMap.end() && idx2MBBMap.size()>0)) ? (I-1): I;
|
||||
|
||||
assert(J != idx2MBBMap.end() && J->first <= index &&
|
||||
index < getMBBEndIdx(J->second) &&
|
||||
"index does not correspond to an MBB");
|
||||
return J->second;
|
||||
}
|
||||
|
||||
bool findLiveInMBBs(SlotIndex start, SlotIndex end,
|
||||
SmallVectorImpl<MachineBasicBlock*> &mbbs) const {
|
||||
SmallVectorImpl<IdxMBBPair>::const_iterator itr =
|
||||
std::lower_bound(idx2MBBMap.begin(), idx2MBBMap.end(), start);
|
||||
bool resVal = false;
|
||||
|
||||
while (itr != idx2MBBMap.end()) {
|
||||
if (itr->first >= end)
|
||||
break;
|
||||
mbbs.push_back(itr->second);
|
||||
resVal = true;
|
||||
++itr;
|
||||
}
|
||||
return resVal;
|
||||
}
|
||||
|
||||
/// Returns the MBB covering the given range, or null if the range covers
|
||||
/// more than one basic block.
|
||||
MachineBasicBlock* getMBBCoveringRange(SlotIndex start, SlotIndex end) const {
|
||||
|
||||
assert(start < end && "Backwards ranges not allowed.");
|
||||
|
||||
SmallVectorImpl<IdxMBBPair>::const_iterator itr =
|
||||
std::lower_bound(idx2MBBMap.begin(), idx2MBBMap.end(), start);
|
||||
|
||||
if (itr == idx2MBBMap.end()) {
|
||||
itr = prior(itr);
|
||||
return itr->second;
|
||||
}
|
||||
|
||||
// Check that we don't cross the boundary into this block.
|
||||
if (itr->first < end)
|
||||
return 0;
|
||||
|
||||
itr = prior(itr);
|
||||
|
||||
if (itr->first <= start)
|
||||
return itr->second;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Insert the given machine instruction into the mapping. Returns the
|
||||
/// assigned index.
|
||||
/// If Late is set and there are null indexes between mi's neighboring
|
||||
/// instructions, create the new index after the null indexes instead of
|
||||
/// before them.
|
||||
SlotIndex insertMachineInstrInMaps(MachineInstr *mi, bool Late = false) {
|
||||
assert(!mi->isInsideBundle() &&
|
||||
"Instructions inside bundles should use bundle start's slot.");
|
||||
assert(mi2iMap.find(mi) == mi2iMap.end() && "Instr already indexed.");
|
||||
// Numbering DBG_VALUE instructions could cause code generation to be
|
||||
// affected by debug information.
|
||||
assert(!mi->isDebugValue() && "Cannot number DBG_VALUE instructions.");
|
||||
|
||||
assert(mi->getParent() != 0 && "Instr must be added to function.");
|
||||
|
||||
// Get the entries where mi should be inserted.
|
||||
IndexList::iterator prevItr, nextItr;
|
||||
if (Late) {
|
||||
// Insert mi's index immediately before the following instruction.
|
||||
nextItr = getIndexAfter(mi).listEntry();
|
||||
prevItr = prior(nextItr);
|
||||
} else {
|
||||
// Insert mi's index immediately after the preceding instruction.
|
||||
prevItr = getIndexBefore(mi).listEntry();
|
||||
nextItr = llvm::next(prevItr);
|
||||
}
|
||||
|
||||
// Get a number for the new instr, or 0 if there's no room currently.
|
||||
// In the latter case we'll force a renumber later.
|
||||
unsigned dist = ((nextItr->getIndex() - prevItr->getIndex())/2) & ~3u;
|
||||
unsigned newNumber = prevItr->getIndex() + dist;
|
||||
|
||||
// Insert a new list entry for mi.
|
||||
IndexList::iterator newItr =
|
||||
indexList.insert(nextItr, createEntry(mi, newNumber));
|
||||
|
||||
// Renumber locally if we need to.
|
||||
if (dist == 0)
|
||||
renumberIndexes(newItr);
|
||||
|
||||
SlotIndex newIndex(&*newItr, SlotIndex::Slot_Block);
|
||||
mi2iMap.insert(std::make_pair(mi, newIndex));
|
||||
return newIndex;
|
||||
}
|
||||
|
||||
/// Remove the given machine instruction from the mapping.
|
||||
void removeMachineInstrFromMaps(MachineInstr *mi) {
|
||||
// remove index -> MachineInstr and
|
||||
// MachineInstr -> index mappings
|
||||
Mi2IndexMap::iterator mi2iItr = mi2iMap.find(mi);
|
||||
if (mi2iItr != mi2iMap.end()) {
|
||||
IndexListEntry *miEntry(mi2iItr->second.listEntry());
|
||||
assert(miEntry->getInstr() == mi && "Instruction indexes broken.");
|
||||
// FIXME: Eventually we want to actually delete these indexes.
|
||||
miEntry->setInstr(0);
|
||||
mi2iMap.erase(mi2iItr);
|
||||
}
|
||||
}
|
||||
|
||||
/// ReplaceMachineInstrInMaps - Replacing a machine instr with a new one in
|
||||
/// maps used by register allocator.
|
||||
void replaceMachineInstrInMaps(MachineInstr *mi, MachineInstr *newMI) {
|
||||
Mi2IndexMap::iterator mi2iItr = mi2iMap.find(mi);
|
||||
if (mi2iItr == mi2iMap.end())
|
||||
return;
|
||||
SlotIndex replaceBaseIndex = mi2iItr->second;
|
||||
IndexListEntry *miEntry(replaceBaseIndex.listEntry());
|
||||
assert(miEntry->getInstr() == mi &&
|
||||
"Mismatched instruction in index tables.");
|
||||
miEntry->setInstr(newMI);
|
||||
mi2iMap.erase(mi2iItr);
|
||||
mi2iMap.insert(std::make_pair(newMI, replaceBaseIndex));
|
||||
}
|
||||
|
||||
/// Add the given MachineBasicBlock into the maps.
|
||||
void insertMBBInMaps(MachineBasicBlock *mbb) {
|
||||
MachineFunction::iterator nextMBB =
|
||||
llvm::next(MachineFunction::iterator(mbb));
|
||||
|
||||
IndexListEntry *startEntry = 0;
|
||||
IndexListEntry *endEntry = 0;
|
||||
IndexList::iterator newItr;
|
||||
if (nextMBB == mbb->getParent()->end()) {
|
||||
startEntry = &indexList.back();
|
||||
endEntry = createEntry(0, 0);
|
||||
newItr = indexList.insertAfter(startEntry, endEntry);
|
||||
} else {
|
||||
startEntry = createEntry(0, 0);
|
||||
endEntry = getMBBStartIdx(nextMBB).listEntry();
|
||||
newItr = indexList.insert(endEntry, startEntry);
|
||||
}
|
||||
|
||||
SlotIndex startIdx(startEntry, SlotIndex::Slot_Block);
|
||||
SlotIndex endIdx(endEntry, SlotIndex::Slot_Block);
|
||||
|
||||
MachineFunction::iterator prevMBB(mbb);
|
||||
assert(prevMBB != mbb->getParent()->end() &&
|
||||
"Can't insert a new block at the beginning of a function.");
|
||||
--prevMBB;
|
||||
MBBRanges[prevMBB->getNumber()].second = startIdx;
|
||||
|
||||
assert(unsigned(mbb->getNumber()) == MBBRanges.size() &&
|
||||
"Blocks must be added in order");
|
||||
MBBRanges.push_back(std::make_pair(startIdx, endIdx));
|
||||
idx2MBBMap.push_back(IdxMBBPair(startIdx, mbb));
|
||||
|
||||
renumberIndexes(newItr);
|
||||
std::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare());
|
||||
}
|
||||
|
||||
/// \brief Free the resources that were required to maintain a SlotIndex.
|
||||
///
|
||||
/// Once an index is no longer needed (for instance because the instruction
|
||||
/// at that index has been moved), the resources required to maintain the
|
||||
/// index can be relinquished to reduce memory use and improve renumbering
|
||||
/// performance. Any remaining SlotIndex objects that point to the same
|
||||
/// index are left 'dangling' (much the same as a dangling pointer to a
|
||||
/// freed object) and should not be accessed, except to destruct them.
|
||||
///
|
||||
/// Like dangling pointers, access to dangling SlotIndexes can cause
|
||||
/// painful-to-track-down bugs, especially if the memory for the index
|
||||
/// previously pointed to has been re-used. To detect dangling SlotIndex
|
||||
/// bugs, build with EXPENSIVE_CHECKS=1. This will cause "erased" indexes to
|
||||
/// be retained in a graveyard instead of being freed. Operations on indexes
|
||||
/// in the graveyard will trigger an assertion.
|
||||
void eraseIndex(SlotIndex index) {
|
||||
IndexListEntry *entry = index.listEntry();
|
||||
#ifdef EXPENSIVE_CHECKS
|
||||
indexList.remove(entry);
|
||||
graveyardList.push_back(entry);
|
||||
entry->setPoison();
|
||||
#else
|
||||
indexList.erase(entry);
|
||||
#endif
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
// Specialize IntervalMapInfo for half-open slot index intervals.
|
||||
template <>
|
||||
struct IntervalMapInfo<SlotIndex> : IntervalMapHalfOpenInfo<SlotIndex> {
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif // LLVM_CODEGEN_SLOTINDEXES_H
|
||||
135
thirdparty/clang/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
vendored
Normal file
135
thirdparty/clang/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
vendored
Normal file
@@ -0,0 +1,135 @@
|
||||
//==-- llvm/CodeGen/TargetLoweringObjectFileImpl.h - Object Info -*- C++ -*-==//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file implements classes used to handle lowerings specific to common
|
||||
// object file formats.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_TARGETLOWERINGOBJECTFILEIMPL_H
|
||||
#define LLVM_CODEGEN_TARGETLOWERINGOBJECTFILEIMPL_H
|
||||
|
||||
#include "llvm/ADT/StringRef.h"
|
||||
#include "llvm/MC/SectionKind.h"
|
||||
#include "llvm/Target/TargetLoweringObjectFile.h"
|
||||
|
||||
namespace llvm {
|
||||
class MachineModuleInfo;
|
||||
class Mangler;
|
||||
class MCAsmInfo;
|
||||
class MCExpr;
|
||||
class MCSection;
|
||||
class MCSectionMachO;
|
||||
class MCSymbol;
|
||||
class MCContext;
|
||||
class GlobalValue;
|
||||
class TargetMachine;
|
||||
|
||||
|
||||
class TargetLoweringObjectFileELF : public TargetLoweringObjectFile {
|
||||
bool UseInitArray;
|
||||
|
||||
public:
|
||||
virtual ~TargetLoweringObjectFileELF() {}
|
||||
|
||||
virtual void emitPersonalityValue(MCStreamer &Streamer,
|
||||
const TargetMachine &TM,
|
||||
const MCSymbol *Sym) const;
|
||||
|
||||
/// getSectionForConstant - Given a constant with the SectionKind, return a
|
||||
/// section that it should be placed in.
|
||||
virtual const MCSection *getSectionForConstant(SectionKind Kind) const;
|
||||
|
||||
|
||||
virtual const MCSection *
|
||||
getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
|
||||
Mangler *Mang, const TargetMachine &TM) const;
|
||||
|
||||
virtual const MCSection *
|
||||
SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
|
||||
Mangler *Mang, const TargetMachine &TM) const;
|
||||
|
||||
/// getTTypeGlobalReference - Return an MCExpr to use for a reference to the
|
||||
/// specified type info global variable from exception handling information.
|
||||
virtual const MCExpr *
|
||||
getTTypeGlobalReference(const GlobalValue *GV, Mangler *Mang,
|
||||
MachineModuleInfo *MMI, unsigned Encoding,
|
||||
MCStreamer &Streamer) const;
|
||||
|
||||
// getCFIPersonalitySymbol - The symbol that gets passed to .cfi_personality.
|
||||
virtual MCSymbol *
|
||||
getCFIPersonalitySymbol(const GlobalValue *GV, Mangler *Mang,
|
||||
MachineModuleInfo *MMI) const;
|
||||
|
||||
void InitializeELF(bool UseInitArray_);
|
||||
virtual const MCSection *
|
||||
getStaticCtorSection(unsigned Priority = 65535) const;
|
||||
virtual const MCSection *
|
||||
getStaticDtorSection(unsigned Priority = 65535) const;
|
||||
};
|
||||
|
||||
|
||||
|
||||
class TargetLoweringObjectFileMachO : public TargetLoweringObjectFile {
|
||||
public:
|
||||
virtual ~TargetLoweringObjectFileMachO() {}
|
||||
|
||||
/// emitModuleFlags - Emit the module flags that specify the garbage
|
||||
/// collection information.
|
||||
virtual void emitModuleFlags(MCStreamer &Streamer,
|
||||
ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
|
||||
Mangler *Mang, const TargetMachine &TM) const;
|
||||
|
||||
virtual const MCSection *
|
||||
SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
|
||||
Mangler *Mang, const TargetMachine &TM) const;
|
||||
|
||||
virtual const MCSection *
|
||||
getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
|
||||
Mangler *Mang, const TargetMachine &TM) const;
|
||||
|
||||
virtual const MCSection *getSectionForConstant(SectionKind Kind) const;
|
||||
|
||||
/// shouldEmitUsedDirectiveFor - This hook allows targets to selectively
|
||||
/// decide not to emit the UsedDirective for some symbols in llvm.used.
|
||||
/// FIXME: REMOVE this (rdar://7071300)
|
||||
virtual bool shouldEmitUsedDirectiveFor(const GlobalValue *GV,
|
||||
Mangler *) const;
|
||||
|
||||
/// getTTypeGlobalReference - The mach-o version of this method
|
||||
/// defaults to returning a stub reference.
|
||||
virtual const MCExpr *
|
||||
getTTypeGlobalReference(const GlobalValue *GV, Mangler *Mang,
|
||||
MachineModuleInfo *MMI, unsigned Encoding,
|
||||
MCStreamer &Streamer) const;
|
||||
|
||||
// getCFIPersonalitySymbol - The symbol that gets passed to .cfi_personality.
|
||||
virtual MCSymbol *
|
||||
getCFIPersonalitySymbol(const GlobalValue *GV, Mangler *Mang,
|
||||
MachineModuleInfo *MMI) const;
|
||||
};
|
||||
|
||||
|
||||
|
||||
class TargetLoweringObjectFileCOFF : public TargetLoweringObjectFile {
|
||||
public:
|
||||
virtual ~TargetLoweringObjectFileCOFF() {}
|
||||
|
||||
virtual const MCSection *
|
||||
getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
|
||||
Mangler *Mang, const TargetMachine &TM) const;
|
||||
|
||||
virtual const MCSection *
|
||||
SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
|
||||
Mangler *Mang, const TargetMachine &TM) const;
|
||||
};
|
||||
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
170
thirdparty/clang/include/llvm/CodeGen/TargetSchedule.h
vendored
Normal file
170
thirdparty/clang/include/llvm/CodeGen/TargetSchedule.h
vendored
Normal file
@@ -0,0 +1,170 @@
|
||||
//===-- llvm/CodeGen/TargetSchedule.h - Sched Machine Model -----*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines a wrapper around MCSchedModel that allows the interface to
|
||||
// benefit from information currently only available in TargetInstrInfo.
|
||||
// Ideally, the scheduling interface would be fully defined in the MC layer.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_TARGETSCHEDULE_H
|
||||
#define LLVM_CODEGEN_TARGETSCHEDULE_H
|
||||
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/MC/MCInstrItineraries.h"
|
||||
#include "llvm/MC/MCSchedule.h"
|
||||
#include "llvm/Target/TargetSubtargetInfo.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class TargetRegisterInfo;
|
||||
class TargetSubtargetInfo;
|
||||
class TargetInstrInfo;
|
||||
class MachineInstr;
|
||||
|
||||
/// Provide an instruction scheduling machine model to CodeGen passes.
|
||||
class TargetSchedModel {
|
||||
// For efficiency, hold a copy of the statically defined MCSchedModel for this
|
||||
// processor.
|
||||
MCSchedModel SchedModel;
|
||||
InstrItineraryData InstrItins;
|
||||
const TargetSubtargetInfo *STI;
|
||||
const TargetInstrInfo *TII;
|
||||
|
||||
SmallVector<unsigned, 16> ResourceFactors;
|
||||
unsigned MicroOpFactor; // Multiply to normalize microops to resource units.
|
||||
unsigned ResourceLCM; // Resource units per cycle. Latency normalization factor.
|
||||
public:
|
||||
TargetSchedModel(): STI(0), TII(0) {}
|
||||
|
||||
/// \brief Initialize the machine model for instruction scheduling.
|
||||
///
|
||||
/// The machine model API keeps a copy of the top-level MCSchedModel table
|
||||
/// indices and may query TargetSubtargetInfo and TargetInstrInfo to resolve
|
||||
/// dynamic properties.
|
||||
void init(const MCSchedModel &sm, const TargetSubtargetInfo *sti,
|
||||
const TargetInstrInfo *tii);
|
||||
|
||||
/// Return the MCSchedClassDesc for this instruction.
|
||||
const MCSchedClassDesc *resolveSchedClass(const MachineInstr *MI) const;
|
||||
|
||||
/// \brief TargetInstrInfo getter.
|
||||
const TargetInstrInfo *getInstrInfo() const { return TII; }
|
||||
|
||||
/// \brief Return true if this machine model includes an instruction-level
|
||||
/// scheduling model.
|
||||
///
|
||||
/// This is more detailed than the course grain IssueWidth and default
|
||||
/// latency properties, but separate from the per-cycle itinerary data.
|
||||
bool hasInstrSchedModel() const;
|
||||
|
||||
const MCSchedModel *getMCSchedModel() const { return &SchedModel; }
|
||||
|
||||
/// \brief Return true if this machine model includes cycle-to-cycle itinerary
|
||||
/// data.
|
||||
///
|
||||
/// This models scheduling at each stage in the processor pipeline.
|
||||
bool hasInstrItineraries() const;
|
||||
|
||||
const InstrItineraryData *getInstrItineraries() const {
|
||||
if (hasInstrItineraries())
|
||||
return &InstrItins;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// \brief Identify the processor corresponding to the current subtarget.
|
||||
unsigned getProcessorID() const { return SchedModel.getProcessorID(); }
|
||||
|
||||
/// \brief Maximum number of micro-ops that may be scheduled per cycle.
|
||||
unsigned getIssueWidth() const { return SchedModel.IssueWidth; }
|
||||
|
||||
/// \brief Number of cycles the OOO processor is expected to hide.
|
||||
unsigned getILPWindow() const { return SchedModel.ILPWindow; }
|
||||
|
||||
/// \brief Return the number of issue slots required for this MI.
|
||||
unsigned getNumMicroOps(const MachineInstr *MI,
|
||||
const MCSchedClassDesc *SC = 0) const;
|
||||
|
||||
/// \brief Get the number of kinds of resources for this target.
|
||||
unsigned getNumProcResourceKinds() const {
|
||||
return SchedModel.getNumProcResourceKinds();
|
||||
}
|
||||
|
||||
/// \brief Get a processor resource by ID for convenience.
|
||||
const MCProcResourceDesc *getProcResource(unsigned PIdx) const {
|
||||
return SchedModel.getProcResource(PIdx);
|
||||
}
|
||||
|
||||
typedef const MCWriteProcResEntry *ProcResIter;
|
||||
|
||||
// \brief Get an iterator into the processor resources consumed by this
|
||||
// scheduling class.
|
||||
ProcResIter getWriteProcResBegin(const MCSchedClassDesc *SC) const {
|
||||
// The subtarget holds a single resource table for all processors.
|
||||
return STI->getWriteProcResBegin(SC);
|
||||
}
|
||||
ProcResIter getWriteProcResEnd(const MCSchedClassDesc *SC) const {
|
||||
return STI->getWriteProcResEnd(SC);
|
||||
}
|
||||
|
||||
/// \brief Multiply the number of units consumed for a resource by this factor
|
||||
/// to normalize it relative to other resources.
|
||||
unsigned getResourceFactor(unsigned ResIdx) const {
|
||||
return ResourceFactors[ResIdx];
|
||||
}
|
||||
|
||||
/// \brief Multiply number of micro-ops by this factor to normalize it
|
||||
/// relative to other resources.
|
||||
unsigned getMicroOpFactor() const {
|
||||
return MicroOpFactor;
|
||||
}
|
||||
|
||||
/// \brief Multiply cycle count by this factor to normalize it relative to
|
||||
/// other resources. This is the number of resource units per cycle.
|
||||
unsigned getLatencyFactor() const {
|
||||
return ResourceLCM;
|
||||
}
|
||||
|
||||
/// \brief Compute operand latency based on the available machine model.
|
||||
///
|
||||
/// Computes and return the latency of the given data dependent def and use
|
||||
/// when the operand indices are already known. UseMI may be NULL for an
|
||||
/// unknown user.
|
||||
///
|
||||
/// FindMin may be set to get the minimum vs. expected latency. Minimum
|
||||
/// latency is used for scheduling groups, while expected latency is for
|
||||
/// instruction cost and critical path.
|
||||
unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
|
||||
const MachineInstr *UseMI, unsigned UseOperIdx,
|
||||
bool FindMin) const;
|
||||
|
||||
/// \brief Compute the instruction latency based on the available machine
|
||||
/// model.
|
||||
///
|
||||
/// Compute and return the expected latency of this instruction independent of
|
||||
/// a particular use. computeOperandLatency is the prefered API, but this is
|
||||
/// occasionally useful to help estimate instruction cost.
|
||||
unsigned computeInstrLatency(const MachineInstr *MI) const;
|
||||
|
||||
/// \brief Output dependency latency of a pair of defs of the same register.
|
||||
///
|
||||
/// This is typically one cycle.
|
||||
unsigned computeOutputLatency(const MachineInstr *DefMI, unsigned DefIdx,
|
||||
const MachineInstr *DepMI) const;
|
||||
|
||||
private:
|
||||
/// getDefLatency is a helper for computeOperandLatency. Return the
|
||||
/// instruction's latency if operand lookup is not required.
|
||||
/// Otherwise return -1.
|
||||
int getDefLatency(const MachineInstr *DefMI, bool FindMin) const;
|
||||
};
|
||||
|
||||
} // namespace llvm
|
||||
|
||||
#endif
|
||||
873
thirdparty/clang/include/llvm/CodeGen/ValueTypes.h
vendored
Normal file
873
thirdparty/clang/include/llvm/CodeGen/ValueTypes.h
vendored
Normal file
@@ -0,0 +1,873 @@
|
||||
//===- CodeGen/ValueTypes.h - Low-Level Target independ. types --*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines the set of low-level target independent types which various
|
||||
// values in the code generator are. This allows the target specific behavior
|
||||
// of instructions to be described to target independent passes.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_VALUETYPES_H
|
||||
#define LLVM_CODEGEN_VALUETYPES_H
|
||||
|
||||
#include "llvm/Support/DataTypes.h"
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
#include "llvm/Support/MathExtras.h"
|
||||
#include <cassert>
|
||||
#include <string>
|
||||
|
||||
namespace llvm {
|
||||
class Type;
|
||||
class LLVMContext;
|
||||
struct EVT;
|
||||
|
||||
/// MVT - Machine Value Type. Every type that is supported natively by some
|
||||
/// processor targeted by LLVM occurs here. This means that any legal value
|
||||
/// type can be represented by a MVT.
|
||||
class MVT {
|
||||
public:
|
||||
enum SimpleValueType {
|
||||
// INVALID_SIMPLE_VALUE_TYPE - Simple value types less than zero are
|
||||
// considered extended value types.
|
||||
INVALID_SIMPLE_VALUE_TYPE = -1,
|
||||
|
||||
// If you change this numbering, you must change the values in
|
||||
// ValueTypes.td as well!
|
||||
Other = 0, // This is a non-standard value
|
||||
i1 = 1, // This is a 1 bit integer value
|
||||
i8 = 2, // This is an 8 bit integer value
|
||||
i16 = 3, // This is a 16 bit integer value
|
||||
i32 = 4, // This is a 32 bit integer value
|
||||
i64 = 5, // This is a 64 bit integer value
|
||||
i128 = 6, // This is a 128 bit integer value
|
||||
|
||||
FIRST_INTEGER_VALUETYPE = i1,
|
||||
LAST_INTEGER_VALUETYPE = i128,
|
||||
|
||||
f16 = 7, // This is a 16 bit floating point value
|
||||
f32 = 8, // This is a 32 bit floating point value
|
||||
f64 = 9, // This is a 64 bit floating point value
|
||||
f80 = 10, // This is a 80 bit floating point value
|
||||
f128 = 11, // This is a 128 bit floating point value
|
||||
ppcf128 = 12, // This is a PPC 128-bit floating point value
|
||||
|
||||
FIRST_FP_VALUETYPE = f16,
|
||||
LAST_FP_VALUETYPE = ppcf128,
|
||||
|
||||
v2i1 = 13, // 2 x i1
|
||||
v4i1 = 14, // 4 x i1
|
||||
v8i1 = 15, // 8 x i1
|
||||
v16i1 = 16, // 16 x i1
|
||||
v32i1 = 17, // 32 x i1
|
||||
v64i1 = 18, // 64 x i1
|
||||
|
||||
v2i8 = 19, // 2 x i8
|
||||
v4i8 = 20, // 4 x i8
|
||||
v8i8 = 21, // 8 x i8
|
||||
v16i8 = 22, // 16 x i8
|
||||
v32i8 = 23, // 32 x i8
|
||||
v64i8 = 24, // 64 x i8
|
||||
v1i16 = 25, // 1 x i16
|
||||
v2i16 = 26, // 2 x i16
|
||||
v4i16 = 27, // 4 x i16
|
||||
v8i16 = 28, // 8 x i16
|
||||
v16i16 = 29, // 16 x i16
|
||||
v32i16 = 30, // 32 x i16
|
||||
v1i32 = 31, // 1 x i32
|
||||
v2i32 = 32, // 2 x i32
|
||||
v4i32 = 33, // 4 x i32
|
||||
v8i32 = 34, // 8 x i32
|
||||
v16i32 = 35, // 16 x i32
|
||||
v1i64 = 36, // 1 x i64
|
||||
v2i64 = 37, // 2 x i64
|
||||
v4i64 = 38, // 4 x i64
|
||||
v8i64 = 39, // 8 x i64
|
||||
v16i64 = 40, // 16 x i64
|
||||
|
||||
FIRST_INTEGER_VECTOR_VALUETYPE = v2i1,
|
||||
LAST_INTEGER_VECTOR_VALUETYPE = v16i64,
|
||||
|
||||
v2f16 = 41, // 2 x f16
|
||||
v2f32 = 42, // 2 x f32
|
||||
v4f32 = 43, // 4 x f32
|
||||
v8f32 = 44, // 8 x f32
|
||||
v16f32 = 45, // 16 x f32
|
||||
v2f64 = 46, // 2 x f64
|
||||
v4f64 = 47, // 4 x f64
|
||||
v8f64 = 48, // 8 x f64
|
||||
|
||||
FIRST_FP_VECTOR_VALUETYPE = v2f16,
|
||||
LAST_FP_VECTOR_VALUETYPE = v8f64,
|
||||
|
||||
FIRST_VECTOR_VALUETYPE = v2i1,
|
||||
LAST_VECTOR_VALUETYPE = v8f64,
|
||||
|
||||
x86mmx = 49, // This is an X86 MMX value
|
||||
|
||||
Glue = 50, // This glues nodes together during pre-RA sched
|
||||
|
||||
isVoid = 51, // This has no value
|
||||
|
||||
Untyped = 52, // This value takes a register, but has
|
||||
// unspecified type. The register class
|
||||
// will be determined by the opcode.
|
||||
|
||||
LAST_VALUETYPE = 53, // This always remains at the end of the list.
|
||||
|
||||
// This is the current maximum for LAST_VALUETYPE.
|
||||
// MVT::MAX_ALLOWED_VALUETYPE is used for asserts and to size bit vectors
|
||||
// This value must be a multiple of 32.
|
||||
MAX_ALLOWED_VALUETYPE = 64,
|
||||
|
||||
// Metadata - This is MDNode or MDString.
|
||||
Metadata = 250,
|
||||
|
||||
// iPTRAny - An int value the size of the pointer of the current
|
||||
// target to any address space. This must only be used internal to
|
||||
// tblgen. Other than for overloading, we treat iPTRAny the same as iPTR.
|
||||
iPTRAny = 251,
|
||||
|
||||
// vAny - A vector with any length and element size. This is used
|
||||
// for intrinsics that have overloadings based on vector types.
|
||||
// This is only for tblgen's consumption!
|
||||
vAny = 252,
|
||||
|
||||
// fAny - Any floating-point or vector floating-point value. This is used
|
||||
// for intrinsics that have overloadings based on floating-point types.
|
||||
// This is only for tblgen's consumption!
|
||||
fAny = 253,
|
||||
|
||||
// iAny - An integer or vector integer value of any bit width. This is
|
||||
// used for intrinsics that have overloadings based on integer bit widths.
|
||||
// This is only for tblgen's consumption!
|
||||
iAny = 254,
|
||||
|
||||
// iPTR - An int value the size of the pointer of the current
|
||||
// target. This should only be used internal to tblgen!
|
||||
iPTR = 255
|
||||
};
|
||||
|
||||
SimpleValueType SimpleTy;
|
||||
|
||||
MVT() : SimpleTy((SimpleValueType)(INVALID_SIMPLE_VALUE_TYPE)) {}
|
||||
MVT(SimpleValueType SVT) : SimpleTy(SVT) { }
|
||||
|
||||
bool operator>(const MVT& S) const { return SimpleTy > S.SimpleTy; }
|
||||
bool operator<(const MVT& S) const { return SimpleTy < S.SimpleTy; }
|
||||
bool operator==(const MVT& S) const { return SimpleTy == S.SimpleTy; }
|
||||
bool operator!=(const MVT& S) const { return SimpleTy != S.SimpleTy; }
|
||||
bool operator>=(const MVT& S) const { return SimpleTy >= S.SimpleTy; }
|
||||
bool operator<=(const MVT& S) const { return SimpleTy <= S.SimpleTy; }
|
||||
|
||||
/// isFloatingPoint - Return true if this is a FP, or a vector FP type.
|
||||
bool isFloatingPoint() const {
|
||||
return ((SimpleTy >= MVT::FIRST_FP_VALUETYPE &&
|
||||
SimpleTy <= MVT::LAST_FP_VALUETYPE) ||
|
||||
(SimpleTy >= MVT::FIRST_FP_VECTOR_VALUETYPE &&
|
||||
SimpleTy <= MVT::LAST_FP_VECTOR_VALUETYPE));
|
||||
}
|
||||
|
||||
/// isInteger - Return true if this is an integer, or a vector integer type.
|
||||
bool isInteger() const {
|
||||
return ((SimpleTy >= MVT::FIRST_INTEGER_VALUETYPE &&
|
||||
SimpleTy <= MVT::LAST_INTEGER_VALUETYPE) ||
|
||||
(SimpleTy >= MVT::FIRST_INTEGER_VECTOR_VALUETYPE &&
|
||||
SimpleTy <= MVT::LAST_INTEGER_VECTOR_VALUETYPE));
|
||||
}
|
||||
|
||||
/// isVector - Return true if this is a vector value type.
|
||||
bool isVector() const {
|
||||
return (SimpleTy >= MVT::FIRST_VECTOR_VALUETYPE &&
|
||||
SimpleTy <= MVT::LAST_VECTOR_VALUETYPE);
|
||||
}
|
||||
|
||||
/// is16BitVector - Return true if this is a 16-bit vector type.
|
||||
bool is16BitVector() const {
|
||||
return (SimpleTy == MVT::v2i8 || SimpleTy == MVT::v1i16 ||
|
||||
SimpleTy == MVT::v16i1);
|
||||
}
|
||||
|
||||
/// is32BitVector - Return true if this is a 32-bit vector type.
|
||||
bool is32BitVector() const {
|
||||
return (SimpleTy == MVT::v4i8 || SimpleTy == MVT::v2i16 ||
|
||||
SimpleTy == MVT::v1i32);
|
||||
}
|
||||
|
||||
/// is64BitVector - Return true if this is a 64-bit vector type.
|
||||
bool is64BitVector() const {
|
||||
return (SimpleTy == MVT::v8i8 || SimpleTy == MVT::v4i16 ||
|
||||
SimpleTy == MVT::v2i32 || SimpleTy == MVT::v1i64 ||
|
||||
SimpleTy == MVT::v2f32);
|
||||
}
|
||||
|
||||
/// is128BitVector - Return true if this is a 128-bit vector type.
|
||||
bool is128BitVector() const {
|
||||
return (SimpleTy == MVT::v16i8 || SimpleTy == MVT::v8i16 ||
|
||||
SimpleTy == MVT::v4i32 || SimpleTy == MVT::v2i64 ||
|
||||
SimpleTy == MVT::v4f32 || SimpleTy == MVT::v2f64);
|
||||
}
|
||||
|
||||
/// is256BitVector - Return true if this is a 256-bit vector type.
|
||||
bool is256BitVector() const {
|
||||
return (SimpleTy == MVT::v8f32 || SimpleTy == MVT::v4f64 ||
|
||||
SimpleTy == MVT::v32i8 || SimpleTy == MVT::v16i16 ||
|
||||
SimpleTy == MVT::v8i32 || SimpleTy == MVT::v4i64);
|
||||
}
|
||||
|
||||
/// is512BitVector - Return true if this is a 512-bit vector type.
|
||||
bool is512BitVector() const {
|
||||
return (SimpleTy == MVT::v8f64 || SimpleTy == MVT::v16f32 ||
|
||||
SimpleTy == MVT::v64i8 || SimpleTy == MVT::v32i16 ||
|
||||
SimpleTy == MVT::v8i64 || SimpleTy == MVT::v16i32);
|
||||
}
|
||||
|
||||
/// is1024BitVector - Return true if this is a 1024-bit vector type.
|
||||
bool is1024BitVector() const {
|
||||
return (SimpleTy == MVT::v16i64);
|
||||
}
|
||||
|
||||
/// isPow2VectorType - Returns true if the given vector is a power of 2.
|
||||
bool isPow2VectorType() const {
|
||||
unsigned NElts = getVectorNumElements();
|
||||
return !(NElts & (NElts - 1));
|
||||
}
|
||||
|
||||
/// getPow2VectorType - Widens the length of the given vector MVT up to
|
||||
/// the nearest power of 2 and returns that type.
|
||||
MVT getPow2VectorType() const {
|
||||
if (isPow2VectorType())
|
||||
return *this;
|
||||
|
||||
unsigned NElts = getVectorNumElements();
|
||||
unsigned Pow2NElts = 1 << Log2_32_Ceil(NElts);
|
||||
return MVT::getVectorVT(getVectorElementType(), Pow2NElts);
|
||||
}
|
||||
|
||||
/// getScalarType - If this is a vector type, return the element type,
|
||||
/// otherwise return this.
|
||||
MVT getScalarType() const {
|
||||
return isVector() ? getVectorElementType() : *this;
|
||||
}
|
||||
|
||||
MVT getVectorElementType() const {
|
||||
switch (SimpleTy) {
|
||||
default:
|
||||
llvm_unreachable("Not a vector MVT!");
|
||||
case v2i1 :
|
||||
case v4i1 :
|
||||
case v8i1 :
|
||||
case v16i1 :
|
||||
case v32i1 :
|
||||
case v64i1: return i1;
|
||||
case v2i8 :
|
||||
case v4i8 :
|
||||
case v8i8 :
|
||||
case v16i8:
|
||||
case v32i8:
|
||||
case v64i8: return i8;
|
||||
case v1i16:
|
||||
case v2i16:
|
||||
case v4i16:
|
||||
case v8i16:
|
||||
case v16i16:
|
||||
case v32i16: return i16;
|
||||
case v1i32:
|
||||
case v2i32:
|
||||
case v4i32:
|
||||
case v8i32:
|
||||
case v16i32: return i32;
|
||||
case v1i64:
|
||||
case v2i64:
|
||||
case v4i64:
|
||||
case v8i64:
|
||||
case v16i64: return i64;
|
||||
case v2f16: return f16;
|
||||
case v2f32:
|
||||
case v4f32:
|
||||
case v8f32:
|
||||
case v16f32: return f32;
|
||||
case v2f64:
|
||||
case v4f64:
|
||||
case v8f64: return f64;
|
||||
}
|
||||
}
|
||||
|
||||
unsigned getVectorNumElements() const {
|
||||
switch (SimpleTy) {
|
||||
default:
|
||||
llvm_unreachable("Not a vector MVT!");
|
||||
case v32i1:
|
||||
case v32i8:
|
||||
case v32i16: return 32;
|
||||
case v64i1:
|
||||
case v64i8: return 64;
|
||||
case v16i1:
|
||||
case v16i8:
|
||||
case v16i16:
|
||||
case v16i32:
|
||||
case v16i64:
|
||||
case v16f32: return 16;
|
||||
case v8i1 :
|
||||
case v8i8 :
|
||||
case v8i16:
|
||||
case v8i32:
|
||||
case v8i64:
|
||||
case v8f32:
|
||||
case v8f64: return 8;
|
||||
case v4i1:
|
||||
case v4i8:
|
||||
case v4i16:
|
||||
case v4i32:
|
||||
case v4i64:
|
||||
case v4f32:
|
||||
case v4f64: return 4;
|
||||
case v2i1:
|
||||
case v2i8:
|
||||
case v2i16:
|
||||
case v2i32:
|
||||
case v2i64:
|
||||
case v2f16:
|
||||
case v2f32:
|
||||
case v2f64: return 2;
|
||||
case v1i16:
|
||||
case v1i32:
|
||||
case v1i64: return 1;
|
||||
}
|
||||
}
|
||||
|
||||
unsigned getSizeInBits() const {
|
||||
switch (SimpleTy) {
|
||||
case iPTR:
|
||||
llvm_unreachable("Value type size is target-dependent. Ask TLI.");
|
||||
case iPTRAny:
|
||||
case iAny:
|
||||
case fAny:
|
||||
case vAny:
|
||||
llvm_unreachable("Value type is overloaded.");
|
||||
case Metadata:
|
||||
llvm_unreachable("Value type is metadata.");
|
||||
default:
|
||||
llvm_unreachable("getSizeInBits called on extended MVT.");
|
||||
case i1 : return 1;
|
||||
case v2i1: return 2;
|
||||
case v4i1: return 4;
|
||||
case i8 :
|
||||
case v8i1: return 8;
|
||||
case i16 :
|
||||
case f16:
|
||||
case v16i1:
|
||||
case v2i8:
|
||||
case v1i16: return 16;
|
||||
case f32 :
|
||||
case i32 :
|
||||
case v32i1:
|
||||
case v4i8:
|
||||
case v2i16:
|
||||
case v2f16:
|
||||
case v1i32: return 32;
|
||||
case x86mmx:
|
||||
case f64 :
|
||||
case i64 :
|
||||
case v64i1:
|
||||
case v8i8:
|
||||
case v4i16:
|
||||
case v2i32:
|
||||
case v1i64:
|
||||
case v2f32: return 64;
|
||||
case f80 : return 80;
|
||||
case f128:
|
||||
case ppcf128:
|
||||
case i128:
|
||||
case v16i8:
|
||||
case v8i16:
|
||||
case v4i32:
|
||||
case v2i64:
|
||||
case v4f32:
|
||||
case v2f64: return 128;
|
||||
case v32i8:
|
||||
case v16i16:
|
||||
case v8i32:
|
||||
case v4i64:
|
||||
case v8f32:
|
||||
case v4f64: return 256;
|
||||
case v64i8:
|
||||
case v32i16:
|
||||
case v16i32:
|
||||
case v8i64:
|
||||
case v16f32:
|
||||
case v8f64: return 512;
|
||||
case v16i64:return 1024;
|
||||
}
|
||||
}
|
||||
|
||||
/// getStoreSize - Return the number of bytes overwritten by a store
|
||||
/// of the specified value type.
|
||||
unsigned getStoreSize() const {
|
||||
return (getSizeInBits() + 7) / 8;
|
||||
}
|
||||
|
||||
/// getStoreSizeInBits - Return the number of bits overwritten by a store
|
||||
/// of the specified value type.
|
||||
unsigned getStoreSizeInBits() const {
|
||||
return getStoreSize() * 8;
|
||||
}
|
||||
|
||||
/// Return true if this has more bits than VT.
|
||||
bool bitsGT(MVT VT) const {
|
||||
return getSizeInBits() > VT.getSizeInBits();
|
||||
}
|
||||
|
||||
/// Return true if this has no less bits than VT.
|
||||
bool bitsGE(MVT VT) const {
|
||||
return getSizeInBits() >= VT.getSizeInBits();
|
||||
}
|
||||
|
||||
/// Return true if this has less bits than VT.
|
||||
bool bitsLT(MVT VT) const {
|
||||
return getSizeInBits() < VT.getSizeInBits();
|
||||
}
|
||||
|
||||
/// Return true if this has no more bits than VT.
|
||||
bool bitsLE(MVT VT) const {
|
||||
return getSizeInBits() <= VT.getSizeInBits();
|
||||
}
|
||||
|
||||
|
||||
static MVT getFloatingPointVT(unsigned BitWidth) {
|
||||
switch (BitWidth) {
|
||||
default:
|
||||
llvm_unreachable("Bad bit width!");
|
||||
case 16:
|
||||
return MVT::f16;
|
||||
case 32:
|
||||
return MVT::f32;
|
||||
case 64:
|
||||
return MVT::f64;
|
||||
case 80:
|
||||
return MVT::f80;
|
||||
case 128:
|
||||
return MVT::f128;
|
||||
}
|
||||
}
|
||||
|
||||
static MVT getIntegerVT(unsigned BitWidth) {
|
||||
switch (BitWidth) {
|
||||
default:
|
||||
return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
|
||||
case 1:
|
||||
return MVT::i1;
|
||||
case 8:
|
||||
return MVT::i8;
|
||||
case 16:
|
||||
return MVT::i16;
|
||||
case 32:
|
||||
return MVT::i32;
|
||||
case 64:
|
||||
return MVT::i64;
|
||||
case 128:
|
||||
return MVT::i128;
|
||||
}
|
||||
}
|
||||
|
||||
static MVT getVectorVT(MVT VT, unsigned NumElements) {
|
||||
switch (VT.SimpleTy) {
|
||||
default:
|
||||
break;
|
||||
case MVT::i1:
|
||||
if (NumElements == 2) return MVT::v2i1;
|
||||
if (NumElements == 4) return MVT::v4i1;
|
||||
if (NumElements == 8) return MVT::v8i1;
|
||||
if (NumElements == 16) return MVT::v16i1;
|
||||
if (NumElements == 32) return MVT::v32i1;
|
||||
if (NumElements == 64) return MVT::v64i1;
|
||||
break;
|
||||
case MVT::i8:
|
||||
if (NumElements == 2) return MVT::v2i8;
|
||||
if (NumElements == 4) return MVT::v4i8;
|
||||
if (NumElements == 8) return MVT::v8i8;
|
||||
if (NumElements == 16) return MVT::v16i8;
|
||||
if (NumElements == 32) return MVT::v32i8;
|
||||
if (NumElements == 64) return MVT::v64i8;
|
||||
break;
|
||||
case MVT::i16:
|
||||
if (NumElements == 1) return MVT::v1i16;
|
||||
if (NumElements == 2) return MVT::v2i16;
|
||||
if (NumElements == 4) return MVT::v4i16;
|
||||
if (NumElements == 8) return MVT::v8i16;
|
||||
if (NumElements == 16) return MVT::v16i16;
|
||||
if (NumElements == 32) return MVT::v32i16;
|
||||
break;
|
||||
case MVT::i32:
|
||||
if (NumElements == 1) return MVT::v1i32;
|
||||
if (NumElements == 2) return MVT::v2i32;
|
||||
if (NumElements == 4) return MVT::v4i32;
|
||||
if (NumElements == 8) return MVT::v8i32;
|
||||
if (NumElements == 16) return MVT::v16i32;
|
||||
break;
|
||||
case MVT::i64:
|
||||
if (NumElements == 1) return MVT::v1i64;
|
||||
if (NumElements == 2) return MVT::v2i64;
|
||||
if (NumElements == 4) return MVT::v4i64;
|
||||
if (NumElements == 8) return MVT::v8i64;
|
||||
if (NumElements == 16) return MVT::v16i64;
|
||||
break;
|
||||
case MVT::f16:
|
||||
if (NumElements == 2) return MVT::v2f16;
|
||||
break;
|
||||
case MVT::f32:
|
||||
if (NumElements == 2) return MVT::v2f32;
|
||||
if (NumElements == 4) return MVT::v4f32;
|
||||
if (NumElements == 8) return MVT::v8f32;
|
||||
if (NumElements == 16) return MVT::v16f32;
|
||||
break;
|
||||
case MVT::f64:
|
||||
if (NumElements == 2) return MVT::v2f64;
|
||||
if (NumElements == 4) return MVT::v4f64;
|
||||
if (NumElements == 8) return MVT::v8f64;
|
||||
break;
|
||||
}
|
||||
return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
|
||||
}
|
||||
|
||||
/// Return the value type corresponding to the specified type. This returns
|
||||
/// all pointers as iPTR. If HandleUnknown is true, unknown types are
|
||||
/// returned as Other, otherwise they are invalid.
|
||||
static MVT getVT(Type *Ty, bool HandleUnknown = false);
|
||||
|
||||
};
|
||||
|
||||
|
||||
/// EVT - Extended Value Type. Capable of holding value types which are not
|
||||
/// native for any processor (such as the i12345 type), as well as the types
|
||||
/// a MVT can represent.
|
||||
struct EVT {
|
||||
private:
|
||||
MVT V;
|
||||
Type *LLVMTy;
|
||||
|
||||
public:
|
||||
EVT() : V((MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE)),
|
||||
LLVMTy(0) {}
|
||||
EVT(MVT::SimpleValueType SVT) : V(SVT), LLVMTy(0) { }
|
||||
EVT(MVT S) : V(S), LLVMTy(0) {}
|
||||
|
||||
bool operator==(EVT VT) const {
|
||||
return !(*this != VT);
|
||||
}
|
||||
bool operator!=(EVT VT) const {
|
||||
if (V.SimpleTy != VT.V.SimpleTy)
|
||||
return true;
|
||||
if (V.SimpleTy < 0)
|
||||
return LLVMTy != VT.LLVMTy;
|
||||
return false;
|
||||
}
|
||||
|
||||
/// getFloatingPointVT - Returns the EVT that represents a floating point
|
||||
/// type with the given number of bits. There are two floating point types
|
||||
/// with 128 bits - this returns f128 rather than ppcf128.
|
||||
static EVT getFloatingPointVT(unsigned BitWidth) {
|
||||
return MVT::getFloatingPointVT(BitWidth);
|
||||
}
|
||||
|
||||
/// getIntegerVT - Returns the EVT that represents an integer with the given
|
||||
/// number of bits.
|
||||
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth) {
|
||||
MVT M = MVT::getIntegerVT(BitWidth);
|
||||
if (M.SimpleTy >= 0)
|
||||
return M;
|
||||
return getExtendedIntegerVT(Context, BitWidth);
|
||||
}
|
||||
|
||||
/// getVectorVT - Returns the EVT that represents a vector NumElements in
|
||||
/// length, where each element is of type VT.
|
||||
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements) {
|
||||
MVT M = MVT::getVectorVT(VT.V, NumElements);
|
||||
if (M.SimpleTy >= 0)
|
||||
return M;
|
||||
return getExtendedVectorVT(Context, VT, NumElements);
|
||||
}
|
||||
|
||||
/// changeVectorElementTypeToInteger - Return a vector with the same number
|
||||
/// of elements as this vector, but with the element type converted to an
|
||||
/// integer type with the same bitwidth.
|
||||
EVT changeVectorElementTypeToInteger() const {
|
||||
if (!isSimple())
|
||||
return changeExtendedVectorElementTypeToInteger();
|
||||
MVT EltTy = getSimpleVT().getVectorElementType();
|
||||
unsigned BitWidth = EltTy.getSizeInBits();
|
||||
MVT IntTy = MVT::getIntegerVT(BitWidth);
|
||||
MVT VecTy = MVT::getVectorVT(IntTy, getVectorNumElements());
|
||||
assert(VecTy.SimpleTy >= 0 &&
|
||||
"Simple vector VT not representable by simple integer vector VT!");
|
||||
return VecTy;
|
||||
}
|
||||
|
||||
/// isSimple - Test if the given EVT is simple (as opposed to being
|
||||
/// extended).
|
||||
bool isSimple() const {
|
||||
return V.SimpleTy >= 0;
|
||||
}
|
||||
|
||||
/// isExtended - Test if the given EVT is extended (as opposed to
|
||||
/// being simple).
|
||||
bool isExtended() const {
|
||||
return !isSimple();
|
||||
}
|
||||
|
||||
/// isFloatingPoint - Return true if this is a FP, or a vector FP type.
|
||||
bool isFloatingPoint() const {
|
||||
return isSimple() ? V.isFloatingPoint() : isExtendedFloatingPoint();
|
||||
}
|
||||
|
||||
/// isInteger - Return true if this is an integer, or a vector integer type.
|
||||
bool isInteger() const {
|
||||
return isSimple() ? V.isInteger() : isExtendedInteger();
|
||||
}
|
||||
|
||||
/// isVector - Return true if this is a vector value type.
|
||||
bool isVector() const {
|
||||
return isSimple() ? V.isVector() : isExtendedVector();
|
||||
}
|
||||
|
||||
/// is16BitVector - Return true if this is a 16-bit vector type.
|
||||
bool is16BitVector() const {
|
||||
return isSimple() ? V.is16BitVector() : isExtended16BitVector();
|
||||
}
|
||||
|
||||
/// is32BitVector - Return true if this is a 32-bit vector type.
|
||||
bool is32BitVector() const {
|
||||
return isSimple() ? V.is32BitVector() : isExtended32BitVector();
|
||||
}
|
||||
|
||||
/// is64BitVector - Return true if this is a 64-bit vector type.
|
||||
bool is64BitVector() const {
|
||||
return isSimple() ? V.is64BitVector() : isExtended64BitVector();
|
||||
}
|
||||
|
||||
/// is128BitVector - Return true if this is a 128-bit vector type.
|
||||
bool is128BitVector() const {
|
||||
return isSimple() ? V.is128BitVector() : isExtended128BitVector();
|
||||
}
|
||||
|
||||
/// is256BitVector - Return true if this is a 256-bit vector type.
|
||||
bool is256BitVector() const {
|
||||
return isSimple() ? V.is256BitVector() : isExtended256BitVector();
|
||||
}
|
||||
|
||||
/// is512BitVector - Return true if this is a 512-bit vector type.
|
||||
bool is512BitVector() const {
|
||||
return isSimple() ? V.is512BitVector() : isExtended512BitVector();
|
||||
}
|
||||
|
||||
/// is1024BitVector - Return true if this is a 1024-bit vector type.
|
||||
bool is1024BitVector() const {
|
||||
return isSimple() ? V.is1024BitVector() : isExtended1024BitVector();
|
||||
}
|
||||
|
||||
/// isOverloaded - Return true if this is an overloaded type for TableGen.
|
||||
bool isOverloaded() const {
|
||||
return (V==MVT::iAny || V==MVT::fAny || V==MVT::vAny || V==MVT::iPTRAny);
|
||||
}
|
||||
|
||||
/// isByteSized - Return true if the bit size is a multiple of 8.
|
||||
bool isByteSized() const {
|
||||
return (getSizeInBits() & 7) == 0;
|
||||
}
|
||||
|
||||
/// isRound - Return true if the size is a power-of-two number of bytes.
|
||||
bool isRound() const {
|
||||
unsigned BitSize = getSizeInBits();
|
||||
return BitSize >= 8 && !(BitSize & (BitSize - 1));
|
||||
}
|
||||
|
||||
/// bitsEq - Return true if this has the same number of bits as VT.
|
||||
bool bitsEq(EVT VT) const {
|
||||
if (EVT::operator==(VT)) return true;
|
||||
return getSizeInBits() == VT.getSizeInBits();
|
||||
}
|
||||
|
||||
/// bitsGT - Return true if this has more bits than VT.
|
||||
bool bitsGT(EVT VT) const {
|
||||
if (EVT::operator==(VT)) return false;
|
||||
return getSizeInBits() > VT.getSizeInBits();
|
||||
}
|
||||
|
||||
/// bitsGE - Return true if this has no less bits than VT.
|
||||
bool bitsGE(EVT VT) const {
|
||||
if (EVT::operator==(VT)) return true;
|
||||
return getSizeInBits() >= VT.getSizeInBits();
|
||||
}
|
||||
|
||||
/// bitsLT - Return true if this has less bits than VT.
|
||||
bool bitsLT(EVT VT) const {
|
||||
if (EVT::operator==(VT)) return false;
|
||||
return getSizeInBits() < VT.getSizeInBits();
|
||||
}
|
||||
|
||||
/// bitsLE - Return true if this has no more bits than VT.
|
||||
bool bitsLE(EVT VT) const {
|
||||
if (EVT::operator==(VT)) return true;
|
||||
return getSizeInBits() <= VT.getSizeInBits();
|
||||
}
|
||||
|
||||
|
||||
/// getSimpleVT - Return the SimpleValueType held in the specified
|
||||
/// simple EVT.
|
||||
MVT getSimpleVT() const {
|
||||
assert(isSimple() && "Expected a SimpleValueType!");
|
||||
return V;
|
||||
}
|
||||
|
||||
/// getScalarType - If this is a vector type, return the element type,
|
||||
/// otherwise return this.
|
||||
EVT getScalarType() const {
|
||||
return isVector() ? getVectorElementType() : *this;
|
||||
}
|
||||
|
||||
/// getVectorElementType - Given a vector type, return the type of
|
||||
/// each element.
|
||||
EVT getVectorElementType() const {
|
||||
assert(isVector() && "Invalid vector type!");
|
||||
if (isSimple())
|
||||
return V.getVectorElementType();
|
||||
return getExtendedVectorElementType();
|
||||
}
|
||||
|
||||
/// getVectorNumElements - Given a vector type, return the number of
|
||||
/// elements it contains.
|
||||
unsigned getVectorNumElements() const {
|
||||
assert(isVector() && "Invalid vector type!");
|
||||
if (isSimple())
|
||||
return V.getVectorNumElements();
|
||||
return getExtendedVectorNumElements();
|
||||
}
|
||||
|
||||
/// getSizeInBits - Return the size of the specified value type in bits.
|
||||
unsigned getSizeInBits() const {
|
||||
if (isSimple())
|
||||
return V.getSizeInBits();
|
||||
return getExtendedSizeInBits();
|
||||
}
|
||||
|
||||
/// getStoreSize - Return the number of bytes overwritten by a store
|
||||
/// of the specified value type.
|
||||
unsigned getStoreSize() const {
|
||||
return (getSizeInBits() + 7) / 8;
|
||||
}
|
||||
|
||||
/// getStoreSizeInBits - Return the number of bits overwritten by a store
|
||||
/// of the specified value type.
|
||||
unsigned getStoreSizeInBits() const {
|
||||
return getStoreSize() * 8;
|
||||
}
|
||||
|
||||
/// getRoundIntegerType - Rounds the bit-width of the given integer EVT up
|
||||
/// to the nearest power of two (and at least to eight), and returns the
|
||||
/// integer EVT with that number of bits.
|
||||
EVT getRoundIntegerType(LLVMContext &Context) const {
|
||||
assert(isInteger() && !isVector() && "Invalid integer type!");
|
||||
unsigned BitWidth = getSizeInBits();
|
||||
if (BitWidth <= 8)
|
||||
return EVT(MVT::i8);
|
||||
return getIntegerVT(Context, 1 << Log2_32_Ceil(BitWidth));
|
||||
}
|
||||
|
||||
/// getHalfSizedIntegerVT - Finds the smallest simple value type that is
|
||||
/// greater than or equal to half the width of this EVT. If no simple
|
||||
/// value type can be found, an extended integer value type of half the
|
||||
/// size (rounded up) is returned.
|
||||
EVT getHalfSizedIntegerVT(LLVMContext &Context) const {
|
||||
assert(isInteger() && !isVector() && "Invalid integer type!");
|
||||
unsigned EVTSize = getSizeInBits();
|
||||
for (unsigned IntVT = MVT::FIRST_INTEGER_VALUETYPE;
|
||||
IntVT <= MVT::LAST_INTEGER_VALUETYPE; ++IntVT) {
|
||||
EVT HalfVT = EVT((MVT::SimpleValueType)IntVT);
|
||||
if (HalfVT.getSizeInBits() * 2 >= EVTSize)
|
||||
return HalfVT;
|
||||
}
|
||||
return getIntegerVT(Context, (EVTSize + 1) / 2);
|
||||
}
|
||||
|
||||
/// isPow2VectorType - Returns true if the given vector is a power of 2.
|
||||
bool isPow2VectorType() const {
|
||||
unsigned NElts = getVectorNumElements();
|
||||
return !(NElts & (NElts - 1));
|
||||
}
|
||||
|
||||
/// getPow2VectorType - Widens the length of the given vector EVT up to
|
||||
/// the nearest power of 2 and returns that type.
|
||||
EVT getPow2VectorType(LLVMContext &Context) const {
|
||||
if (!isPow2VectorType()) {
|
||||
unsigned NElts = getVectorNumElements();
|
||||
unsigned Pow2NElts = 1 << Log2_32_Ceil(NElts);
|
||||
return EVT::getVectorVT(Context, getVectorElementType(), Pow2NElts);
|
||||
}
|
||||
else {
|
||||
return *this;
|
||||
}
|
||||
}
|
||||
|
||||
/// getEVTString - This function returns value type as a string,
|
||||
/// e.g. "i32".
|
||||
std::string getEVTString() const;
|
||||
|
||||
/// getTypeForEVT - This method returns an LLVM type corresponding to the
|
||||
/// specified EVT. For integer types, this returns an unsigned type. Note
|
||||
/// that this will abort for types that cannot be represented.
|
||||
Type *getTypeForEVT(LLVMContext &Context) const;
|
||||
|
||||
/// getEVT - Return the value type corresponding to the specified type.
|
||||
/// This returns all pointers as iPTR. If HandleUnknown is true, unknown
|
||||
/// types are returned as Other, otherwise they are invalid.
|
||||
static EVT getEVT(Type *Ty, bool HandleUnknown = false);
|
||||
|
||||
intptr_t getRawBits() const {
|
||||
if (isSimple())
|
||||
return V.SimpleTy;
|
||||
else
|
||||
return (intptr_t)(LLVMTy);
|
||||
}
|
||||
|
||||
/// compareRawBits - A meaningless but well-behaved order, useful for
|
||||
/// constructing containers.
|
||||
struct compareRawBits {
|
||||
bool operator()(EVT L, EVT R) const {
|
||||
if (L.V.SimpleTy == R.V.SimpleTy)
|
||||
return L.LLVMTy < R.LLVMTy;
|
||||
else
|
||||
return L.V.SimpleTy < R.V.SimpleTy;
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
// Methods for handling the Extended-type case in functions above.
|
||||
// These are all out-of-line to prevent users of this header file
|
||||
// from having a dependency on Type.h.
|
||||
EVT changeExtendedVectorElementTypeToInteger() const;
|
||||
static EVT getExtendedIntegerVT(LLVMContext &C, unsigned BitWidth);
|
||||
static EVT getExtendedVectorVT(LLVMContext &C, EVT VT,
|
||||
unsigned NumElements);
|
||||
bool isExtendedFloatingPoint() const;
|
||||
bool isExtendedInteger() const;
|
||||
bool isExtendedVector() const;
|
||||
bool isExtended16BitVector() const;
|
||||
bool isExtended32BitVector() const;
|
||||
bool isExtended64BitVector() const;
|
||||
bool isExtended128BitVector() const;
|
||||
bool isExtended256BitVector() const;
|
||||
bool isExtended512BitVector() const;
|
||||
bool isExtended1024BitVector() const;
|
||||
EVT getExtendedVectorElementType() const;
|
||||
unsigned getExtendedVectorNumElements() const;
|
||||
unsigned getExtendedSizeInBits() const;
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
190
thirdparty/clang/include/llvm/CodeGen/VirtRegMap.h
vendored
Normal file
190
thirdparty/clang/include/llvm/CodeGen/VirtRegMap.h
vendored
Normal file
@@ -0,0 +1,190 @@
|
||||
//===-- llvm/CodeGen/VirtRegMap.h - Virtual Register Map -*- C++ -*--------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file implements a virtual register map. This maps virtual registers to
|
||||
// physical registers and virtual registers to stack slots. It is created and
|
||||
// updated by a register allocator and then used by a machine code rewriter that
|
||||
// adds spill code and rewrites virtual into physical register references.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_VIRTREGMAP_H
|
||||
#define LLVM_CODEGEN_VIRTREGMAP_H
|
||||
|
||||
#include "llvm/ADT/IndexedMap.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
#include "llvm/Target/TargetRegisterInfo.h"
|
||||
|
||||
namespace llvm {
|
||||
class MachineInstr;
|
||||
class MachineFunction;
|
||||
class MachineRegisterInfo;
|
||||
class TargetInstrInfo;
|
||||
class raw_ostream;
|
||||
class SlotIndexes;
|
||||
|
||||
class VirtRegMap : public MachineFunctionPass {
|
||||
public:
|
||||
enum {
|
||||
NO_PHYS_REG = 0,
|
||||
NO_STACK_SLOT = (1L << 30)-1,
|
||||
MAX_STACK_SLOT = (1L << 18)-1
|
||||
};
|
||||
|
||||
private:
|
||||
MachineRegisterInfo *MRI;
|
||||
const TargetInstrInfo *TII;
|
||||
const TargetRegisterInfo *TRI;
|
||||
MachineFunction *MF;
|
||||
|
||||
/// Virt2PhysMap - This is a virtual to physical register
|
||||
/// mapping. Each virtual register is required to have an entry in
|
||||
/// it; even spilled virtual registers (the register mapped to a
|
||||
/// spilled register is the temporary used to load it from the
|
||||
/// stack).
|
||||
IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2PhysMap;
|
||||
|
||||
/// Virt2StackSlotMap - This is virtual register to stack slot
|
||||
/// mapping. Each spilled virtual register has an entry in it
|
||||
/// which corresponds to the stack slot this register is spilled
|
||||
/// at.
|
||||
IndexedMap<int, VirtReg2IndexFunctor> Virt2StackSlotMap;
|
||||
|
||||
/// Virt2SplitMap - This is virtual register to splitted virtual register
|
||||
/// mapping.
|
||||
IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2SplitMap;
|
||||
|
||||
/// createSpillSlot - Allocate a spill slot for RC from MFI.
|
||||
unsigned createSpillSlot(const TargetRegisterClass *RC);
|
||||
|
||||
VirtRegMap(const VirtRegMap&) LLVM_DELETED_FUNCTION;
|
||||
void operator=(const VirtRegMap&) LLVM_DELETED_FUNCTION;
|
||||
|
||||
public:
|
||||
static char ID;
|
||||
VirtRegMap() : MachineFunctionPass(ID), Virt2PhysMap(NO_PHYS_REG),
|
||||
Virt2StackSlotMap(NO_STACK_SLOT), Virt2SplitMap(0) { }
|
||||
virtual bool runOnMachineFunction(MachineFunction &MF);
|
||||
|
||||
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
|
||||
AU.setPreservesAll();
|
||||
MachineFunctionPass::getAnalysisUsage(AU);
|
||||
}
|
||||
|
||||
MachineFunction &getMachineFunction() const {
|
||||
assert(MF && "getMachineFunction called before runOnMachineFunction");
|
||||
return *MF;
|
||||
}
|
||||
|
||||
MachineRegisterInfo &getRegInfo() const { return *MRI; }
|
||||
const TargetRegisterInfo &getTargetRegInfo() const { return *TRI; }
|
||||
|
||||
void grow();
|
||||
|
||||
/// @brief returns true if the specified virtual register is
|
||||
/// mapped to a physical register
|
||||
bool hasPhys(unsigned virtReg) const {
|
||||
return getPhys(virtReg) != NO_PHYS_REG;
|
||||
}
|
||||
|
||||
/// @brief returns the physical register mapped to the specified
|
||||
/// virtual register
|
||||
unsigned getPhys(unsigned virtReg) const {
|
||||
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
|
||||
return Virt2PhysMap[virtReg];
|
||||
}
|
||||
|
||||
/// @brief creates a mapping for the specified virtual register to
|
||||
/// the specified physical register
|
||||
void assignVirt2Phys(unsigned virtReg, unsigned physReg) {
|
||||
assert(TargetRegisterInfo::isVirtualRegister(virtReg) &&
|
||||
TargetRegisterInfo::isPhysicalRegister(physReg));
|
||||
assert(Virt2PhysMap[virtReg] == NO_PHYS_REG &&
|
||||
"attempt to assign physical register to already mapped "
|
||||
"virtual register");
|
||||
Virt2PhysMap[virtReg] = physReg;
|
||||
}
|
||||
|
||||
/// @brief clears the specified virtual register's, physical
|
||||
/// register mapping
|
||||
void clearVirt(unsigned virtReg) {
|
||||
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
|
||||
assert(Virt2PhysMap[virtReg] != NO_PHYS_REG &&
|
||||
"attempt to clear a not assigned virtual register");
|
||||
Virt2PhysMap[virtReg] = NO_PHYS_REG;
|
||||
}
|
||||
|
||||
/// @brief clears all virtual to physical register mappings
|
||||
void clearAllVirt() {
|
||||
Virt2PhysMap.clear();
|
||||
grow();
|
||||
}
|
||||
|
||||
/// @brief returns true if VirtReg is assigned to its preferred physreg.
|
||||
bool hasPreferredPhys(unsigned VirtReg);
|
||||
|
||||
/// @brief returns true if VirtReg has a known preferred register.
|
||||
/// This returns false if VirtReg has a preference that is a virtual
|
||||
/// register that hasn't been assigned yet.
|
||||
bool hasKnownPreference(unsigned VirtReg);
|
||||
|
||||
/// @brief records virtReg is a split live interval from SReg.
|
||||
void setIsSplitFromReg(unsigned virtReg, unsigned SReg) {
|
||||
Virt2SplitMap[virtReg] = SReg;
|
||||
}
|
||||
|
||||
/// @brief returns the live interval virtReg is split from.
|
||||
unsigned getPreSplitReg(unsigned virtReg) const {
|
||||
return Virt2SplitMap[virtReg];
|
||||
}
|
||||
|
||||
/// getOriginal - Return the original virtual register that VirtReg descends
|
||||
/// from through splitting.
|
||||
/// A register that was not created by splitting is its own original.
|
||||
/// This operation is idempotent.
|
||||
unsigned getOriginal(unsigned VirtReg) const {
|
||||
unsigned Orig = getPreSplitReg(VirtReg);
|
||||
return Orig ? Orig : VirtReg;
|
||||
}
|
||||
|
||||
/// @brief returns true if the specified virtual register is not
|
||||
/// mapped to a stack slot or rematerialized.
|
||||
bool isAssignedReg(unsigned virtReg) const {
|
||||
if (getStackSlot(virtReg) == NO_STACK_SLOT)
|
||||
return true;
|
||||
// Split register can be assigned a physical register as well as a
|
||||
// stack slot or remat id.
|
||||
return (Virt2SplitMap[virtReg] && Virt2PhysMap[virtReg] != NO_PHYS_REG);
|
||||
}
|
||||
|
||||
/// @brief returns the stack slot mapped to the specified virtual
|
||||
/// register
|
||||
int getStackSlot(unsigned virtReg) const {
|
||||
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
|
||||
return Virt2StackSlotMap[virtReg];
|
||||
}
|
||||
|
||||
/// @brief create a mapping for the specifed virtual register to
|
||||
/// the next available stack slot
|
||||
int assignVirt2StackSlot(unsigned virtReg);
|
||||
/// @brief create a mapping for the specified virtual register to
|
||||
/// the specified stack slot
|
||||
void assignVirt2StackSlot(unsigned virtReg, int frameIndex);
|
||||
|
||||
void print(raw_ostream &OS, const Module* M = 0) const;
|
||||
void dump() const;
|
||||
};
|
||||
|
||||
inline raw_ostream &operator<<(raw_ostream &OS, const VirtRegMap &VRM) {
|
||||
VRM.print(OS);
|
||||
return OS;
|
||||
}
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
Reference in New Issue
Block a user