forked from OSchip/llvm-project
Update formatting to latest version of clang-format
llvm-svn: 179160
This commit is contained in:
parent
0ee50f6ee4
commit
d7e58640a5
|
|
@ -192,10 +192,10 @@ public:
|
|||
/// loop containing the statemenet.
|
||||
/// @param P A reference to the pass this function is called from.
|
||||
/// The pass is needed to update other analysis.
|
||||
static void generate(
|
||||
IRBuilder<> &B, ScopStmt &Stmt, VectorValueMapT &GlobalMaps,
|
||||
std::vector<LoopToScevMapT> &VLTS, __isl_keep isl_map *Schedule,
|
||||
Pass *P) {
|
||||
static void
|
||||
generate(IRBuilder<> &B, ScopStmt &Stmt, VectorValueMapT &GlobalMaps,
|
||||
std::vector<LoopToScevMapT> &VLTS, __isl_keep isl_map *Schedule,
|
||||
Pass *P) {
|
||||
VectorBlockGenerator Generator(B, GlobalMaps, VLTS, Stmt, Schedule, P);
|
||||
Generator.copyBB();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -130,14 +130,14 @@ private:
|
|||
Value *Size);
|
||||
void createCallSetKernelParameters(Value *Kernel, Value *BlockWidth,
|
||||
Value *BlockHeight, Value *DeviceData);
|
||||
void createCallLaunchKernel(Value *Kernel, Value *GridWidth,
|
||||
Value *GridHeight);
|
||||
void
|
||||
createCallLaunchKernel(Value *Kernel, Value *GridWidth, Value *GridHeight);
|
||||
void createCallStartTimerByCudaEvent(Value *StartEvent, Value *StopEvent);
|
||||
void createCallStopTimerByCudaEvent(Value *StartEvent, Value *StopEvent,
|
||||
Value *Timer);
|
||||
void createCallCleanupGPGPUResources(Value *HostData, Value *DeviceData,
|
||||
Value *Module, Value *Context,
|
||||
Value *Kernel);
|
||||
void
|
||||
createCallCleanupGPGPUResources(Value *HostData, Value *DeviceData,
|
||||
Value *Module, Value *Context, Value *Kernel);
|
||||
|
||||
/// @brief Create the CUDA subfunction.
|
||||
///
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
//===- polly/LinkAllPasses.h ------------ Reference All Passes ---*- C++ -*-===//
|
||||
//===- polly/LinkAllPasses.h ----------- Reference All Passes ---*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
|
|
|
|||
|
|
@ -83,6 +83,7 @@ class MayAliasSet {
|
|||
MayAliasSet() {}
|
||||
|
||||
friend class MayAliasSetInfo;
|
||||
|
||||
public:
|
||||
|
||||
/// @name Must Alias Pointer Iterators
|
||||
|
|
|
|||
|
|
@ -106,6 +106,7 @@ private:
|
|||
|
||||
/// Updated access relation read from JSCOP file.
|
||||
isl_map *newAccessRelation;
|
||||
|
||||
public:
|
||||
// @brief Create a memory access from an access in LLVM-IR.
|
||||
//
|
||||
|
|
@ -293,6 +294,7 @@ class ScopStmt {
|
|||
SmallVectorImpl<unsigned> &Scatter);
|
||||
|
||||
friend class Scop;
|
||||
|
||||
public:
|
||||
|
||||
~ScopStmt();
|
||||
|
|
@ -469,6 +471,7 @@ class Scop {
|
|||
void printStatements(raw_ostream &OS) const;
|
||||
|
||||
friend class ScopInfo;
|
||||
|
||||
public:
|
||||
|
||||
~Scop();
|
||||
|
|
|
|||
|
|
@ -32,6 +32,7 @@ class Scop;
|
|||
/// runOnRegion, subclasses override runOnScop.
|
||||
class ScopPass : public RegionPass {
|
||||
Scop *S;
|
||||
|
||||
protected:
|
||||
explicit ScopPass(char &ID) : RegionPass(ID), S(0) {}
|
||||
|
||||
|
|
@ -50,6 +51,7 @@ public:
|
|||
assert(S && "Not on a Scop!");
|
||||
return *S;
|
||||
}
|
||||
|
||||
private:
|
||||
virtual bool runOnRegion(Region *R, RGPassManager &RGM);
|
||||
void print(raw_ostream &OS, const Module *) const;
|
||||
|
|
|
|||
|
|
@ -28,9 +28,9 @@ namespace polly {
|
|||
/// @param S The SCEV to analyze.
|
||||
/// @param R The region in which we look for dependences.
|
||||
bool hasScalarDepsInsideRegion(const llvm::SCEV *S, const llvm::Region *R);
|
||||
bool isAffineExpr(const llvm::Region *R, const llvm::SCEV *Expression,
|
||||
llvm::ScalarEvolution &SE,
|
||||
const llvm::Value *BaseAddress = 0);
|
||||
bool
|
||||
isAffineExpr(const llvm::Region *R, const llvm::SCEV *Expression,
|
||||
llvm::ScalarEvolution &SE, const llvm::Value *BaseAddress = 0);
|
||||
std::vector<const llvm::SCEV *> getParamsInAffineExpr(
|
||||
const llvm::Region *R, const llvm::SCEV *Expression,
|
||||
llvm::ScalarEvolution &SE, const llvm::Value *BaseAddress = 0);
|
||||
|
|
|
|||
|
|
@ -50,9 +50,9 @@ ValueDependences("polly-value-dependences",
|
|||
//===----------------------------------------------------------------------===//
|
||||
Dependences::Dependences() : ScopPass(ID) { RAW = WAR = WAW = NULL; }
|
||||
|
||||
void Dependences::collectInfo(Scop &S, isl_union_map **Read,
|
||||
isl_union_map **Write, isl_union_map **MayWrite,
|
||||
isl_union_map **Schedule) {
|
||||
void
|
||||
Dependences::collectInfo(Scop &S, isl_union_map **Read, isl_union_map **Write,
|
||||
isl_union_map **MayWrite, isl_union_map **Schedule) {
|
||||
isl_space *Space = S.getParamSpace();
|
||||
*Read = isl_union_map_empty(isl_space_copy(Space));
|
||||
*Write = isl_union_map_empty(isl_space_copy(Space));
|
||||
|
|
|
|||
|
|
@ -150,8 +150,8 @@ std::string ScopDetection::regionIsInvalidBecause(const Region *R) const {
|
|||
return InvalidRegions.find(R)->second;
|
||||
}
|
||||
|
||||
bool ScopDetection::isValidCFG(BasicBlock &BB,
|
||||
DetectionContext &Context) const {
|
||||
bool
|
||||
ScopDetection::isValidCFG(BasicBlock &BB, DetectionContext &Context) const {
|
||||
Region &RefRegion = Context.CurRegion;
|
||||
TerminatorInst *TI = BB.getTerminator();
|
||||
|
||||
|
|
@ -176,8 +176,7 @@ bool ScopDetection::isValidCFG(BasicBlock &BB,
|
|||
// Only Constant and ICmpInst are allowed as condition.
|
||||
if (!(isa<Constant>(Condition) || isa<ICmpInst>(Condition)))
|
||||
INVALID(AffFunc, "Condition in BB '" + BB.getName() +
|
||||
"' neither "
|
||||
"constant nor an icmp instruction");
|
||||
"' neither constant nor an icmp instruction");
|
||||
|
||||
// Allow perfectly nested conditions.
|
||||
assert(Br->getNumSuccessors() == 2 && "Unexpected number of successors");
|
||||
|
|
@ -316,8 +315,8 @@ bool ScopDetection::isValidMemoryAccess(Instruction &Inst,
|
|||
return true;
|
||||
}
|
||||
|
||||
bool ScopDetection::hasScalarDependency(Instruction &Inst,
|
||||
Region &RefRegion) const {
|
||||
bool
|
||||
ScopDetection::hasScalarDependency(Instruction &Inst, Region &RefRegion) const {
|
||||
for (Instruction::use_iterator UI = Inst.use_begin(), UE = Inst.use_end();
|
||||
UI != UE; ++UI)
|
||||
if (Instruction *Use = dyn_cast<Instruction>(*UI))
|
||||
|
|
@ -410,7 +409,7 @@ bool ScopDetection::isValidLoop(Loop *L, DetectionContext &Context) const {
|
|||
if (!isAffineExpr(&Context.CurRegion, LoopCount, *SE))
|
||||
INVALID(LoopBound,
|
||||
"Non affine loop bound '"
|
||||
<< *LoopCount << "' in loop: " << L->getHeader()->getName());
|
||||
<< *LoopCount << "' in loop: " << L->getHeader()->getName());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
@ -438,7 +437,8 @@ Region *ScopDetection::expandRegion(Region &R) {
|
|||
if (LastValidRegion)
|
||||
delete LastValidRegion;
|
||||
|
||||
// Store this region, because it is the greatest valid (encountered so far)
|
||||
// Store this region, because it is the greatest valid (encountered so
|
||||
// far).
|
||||
LastValidRegion = ExpandedRegion;
|
||||
|
||||
// Create and test the next greater region (if any)
|
||||
|
|
@ -456,7 +456,7 @@ Region *ScopDetection::expandRegion(Region &R) {
|
|||
}
|
||||
|
||||
DEBUG(if (LastValidRegion)
|
||||
dbgs() << "\tto " << LastValidRegion->getNameStr() << "\n";
|
||||
dbgs() << "\tto " << LastValidRegion->getNameStr() << "\n";
|
||||
else dbgs() << "\tExpanding " << R.getNameStr() << " failed\n";);
|
||||
|
||||
return LastValidRegion;
|
||||
|
|
|
|||
|
|
@ -229,8 +229,8 @@ MemoryAccess::~MemoryAccess() {
|
|||
isl_map_free(newAccessRelation);
|
||||
}
|
||||
|
||||
static void replace(std::string &str, const std::string &find,
|
||||
const std::string &replace) {
|
||||
static void
|
||||
replace(std::string &str, const std::string &find, const std::string &replace) {
|
||||
size_t pos = 0;
|
||||
while ((pos = str.find(find, pos)) != std::string::npos) {
|
||||
str.replace(pos, find.length(), replace);
|
||||
|
|
@ -546,7 +546,7 @@ __isl_give isl_set *ScopStmt::addLoopBoundsToDomain(__isl_take isl_set *Domain,
|
|||
__isl_give isl_set *ScopStmt::addConditionsToDomain(
|
||||
__isl_take isl_set *Domain, TempScop &tempScop, const Region &CurRegion) {
|
||||
const Region *TopRegion = tempScop.getMaxRegion().getParent(),
|
||||
*CurrentRegion = &CurRegion;
|
||||
*CurrentRegion = &CurRegion;
|
||||
const BasicBlock *BranchingBB = BB;
|
||||
|
||||
do {
|
||||
|
|
|
|||
|
|
@ -71,9 +71,9 @@ void TempScop::print(raw_ostream &OS, ScalarEvolution *SE, LoopInfo *LI) const {
|
|||
printDetail(OS, SE, LI, &R, 0);
|
||||
}
|
||||
|
||||
void TempScop::printDetail(llvm::raw_ostream &OS, ScalarEvolution *SE,
|
||||
LoopInfo *LI, const Region *CurR,
|
||||
unsigned ind) const {}
|
||||
void
|
||||
TempScop::printDetail(llvm::raw_ostream &OS, ScalarEvolution *SE, LoopInfo *LI,
|
||||
const Region *CurR, unsigned ind) const {}
|
||||
|
||||
void TempScopInfo::buildAccessFunctions(Region &R, BasicBlock &BB) {
|
||||
AccFuncSetType Functions;
|
||||
|
|
|
|||
|
|
@ -179,7 +179,7 @@ Value *BlockGenerator::getNewValue(const Value *Old, ValueMapT &BBMap,
|
|||
Value *New = GlobalMap[Old];
|
||||
|
||||
if (Old->getType()->getScalarSizeInBits() <
|
||||
New->getType()->getScalarSizeInBits())
|
||||
New->getType()->getScalarSizeInBits())
|
||||
New = Builder.CreateTruncOrBitCast(New, Old->getType());
|
||||
|
||||
return New;
|
||||
|
|
@ -338,9 +338,9 @@ BlockGenerator::generateScalarStore(const StoreInst *Store, ValueMapT &BBMap,
|
|||
return Builder.CreateStore(ValueOperand, NewPointer);
|
||||
}
|
||||
|
||||
void BlockGenerator::copyInstruction(const Instruction *Inst, ValueMapT &BBMap,
|
||||
ValueMapT &GlobalMap,
|
||||
LoopToScevMapT <S) {
|
||||
void
|
||||
BlockGenerator::copyInstruction(const Instruction *Inst, ValueMapT &BBMap,
|
||||
ValueMapT &GlobalMap, LoopToScevMapT <S) {
|
||||
// Terminator instructions control the control flow. They are explicitly
|
||||
// expressed in the clast and do not need to be copied.
|
||||
if (Inst->isTerminator())
|
||||
|
|
|
|||
|
|
@ -88,6 +88,7 @@ class ClastExpCodeGen {
|
|||
Value *codegen(const clast_term *e, Type *Ty);
|
||||
Value *codegen(const clast_binary *e, Type *Ty);
|
||||
Value *codegen(const clast_reduction *r, Type *Ty);
|
||||
|
||||
public:
|
||||
|
||||
// A generator for clast expressions.
|
||||
|
|
@ -261,10 +262,10 @@ private:
|
|||
|
||||
void codegen(const clast_assignment *a);
|
||||
|
||||
void codegen(const clast_assignment *a, ScopStmt *Statement,
|
||||
unsigned Dimension, int vectorDim,
|
||||
std::vector<ValueMapT> *VectorVMap = 0,
|
||||
std::vector<LoopToScevMapT> *VLTS = 0);
|
||||
void
|
||||
codegen(const clast_assignment *a, ScopStmt *Statement, unsigned Dimension,
|
||||
int vectorDim, std::vector<ValueMapT> *VectorVMap = 0,
|
||||
std::vector<LoopToScevMapT> *VLTS = 0);
|
||||
|
||||
void codegenSubstitutions(const clast_stmt *Assignment, ScopStmt *Statement,
|
||||
int vectorDim = 0,
|
||||
|
|
@ -421,9 +422,9 @@ extractPartialSchedule(ScopStmt *Statement, isl_set *Domain) {
|
|||
UnscheduledDimensions);
|
||||
}
|
||||
|
||||
void ClastStmtCodeGen::codegen(const clast_user_stmt *u,
|
||||
std::vector<Value *> *IVS, const char *iterator,
|
||||
isl_set *Domain) {
|
||||
void
|
||||
ClastStmtCodeGen::codegen(const clast_user_stmt *u, std::vector<Value *> *IVS,
|
||||
const char *iterator, isl_set *Domain) {
|
||||
ScopStmt *Statement = (ScopStmt *)u->statement->usr;
|
||||
|
||||
if (u->substitutions)
|
||||
|
|
@ -488,6 +489,7 @@ void ClastStmtCodeGen::codegenForSequential(const clast_for *f) {
|
|||
// clast. Scalar parameters are scalar variables defined outside of the SCoP.
|
||||
class ParameterVisitor : public ClastVisitor {
|
||||
std::set<Value *> Values;
|
||||
|
||||
public:
|
||||
ParameterVisitor() : ClastVisitor(), Values() {}
|
||||
|
||||
|
|
@ -545,8 +547,8 @@ SetVector<Value *> ClastStmtCodeGen::getOMPValues(const clast_stmt *Body) {
|
|||
return Values;
|
||||
}
|
||||
|
||||
void ClastStmtCodeGen::updateWithValueMap(
|
||||
OMPGenerator::ValueToValueMapTy &VMap) {
|
||||
void
|
||||
ClastStmtCodeGen::updateWithValueMap(OMPGenerator::ValueToValueMapTy &VMap) {
|
||||
std::set<Value *> Inserted;
|
||||
|
||||
for (CharMapT::iterator I = ClastVars.begin(), E = ClastVars.end(); I != E;
|
||||
|
|
@ -986,8 +988,8 @@ public:
|
|||
|
||||
Region &R = S.getRegion();
|
||||
|
||||
assert (!R.isTopLevelRegion() && "Top level regions are not supported");
|
||||
assert (R.getEnteringBlock() && "Only support regions with a single entry");
|
||||
assert(!R.isTopLevelRegion() && "Top level regions are not supported");
|
||||
assert(R.getEnteringBlock() && "Only support regions with a single entry");
|
||||
|
||||
if (!R.getExitingBlock()) {
|
||||
BasicBlock *newExit = createSingleExitEdge(&R, this);
|
||||
|
|
|
|||
|
|
@ -143,8 +143,8 @@ static void freeIslAstUser(void *Ptr) {
|
|||
// dimension, then the loop is parallel. The distance is zero in the current
|
||||
// dimension if it is a subset of a map with equal values for the current
|
||||
// dimension.
|
||||
static bool astScheduleDimIsParallel(__isl_keep isl_ast_build *Build,
|
||||
Dependences *D) {
|
||||
static bool
|
||||
astScheduleDimIsParallel(__isl_keep isl_ast_build *Build, Dependences *D) {
|
||||
isl_union_map *Schedule, *Deps;
|
||||
isl_map *ScheduleDeps, *Test;
|
||||
isl_space *ScheduleSpace;
|
||||
|
|
|
|||
|
|
@ -698,8 +698,8 @@ void IslNodeBuilder::createUserVector(
|
|||
isl_ast_node_free(User);
|
||||
}
|
||||
|
||||
void IslNodeBuilder::createForVector(__isl_take isl_ast_node *For,
|
||||
int VectorWidth) {
|
||||
void
|
||||
IslNodeBuilder::createForVector(__isl_take isl_ast_node *For, int VectorWidth) {
|
||||
isl_ast_node *Body = isl_ast_node_for_get_body(For);
|
||||
isl_ast_expr *Init = isl_ast_node_for_get_init(For);
|
||||
isl_ast_expr *Inc = isl_ast_node_for_get_inc(For);
|
||||
|
|
@ -1028,8 +1028,8 @@ public:
|
|||
|
||||
Region &R = S.getRegion();
|
||||
|
||||
assert (!R.isTopLevelRegion() && "Top level regions are not supported");
|
||||
assert (R.getEnteringBlock() && "Only support regions with a single entry");
|
||||
assert(!R.isTopLevelRegion() && "Top level regions are not supported");
|
||||
assert(R.getEnteringBlock() && "Only support regions with a single entry");
|
||||
|
||||
if (!R.getExitingBlock()) {
|
||||
BasicBlock *newExit = createSingleExitEdge(&R, this);
|
||||
|
|
|
|||
|
|
@ -179,7 +179,7 @@ char &polly::CodePreparationID = CodePreparation::ID;
|
|||
Pass *polly::createCodePreparationPass() { return new CodePreparation(); }
|
||||
|
||||
INITIALIZE_PASS_BEGIN(CodePreparation, "polly-prepare",
|
||||
"Polly - Prepare code for polly", false, false);
|
||||
INITIALIZE_PASS_DEPENDENCY(LoopInfo);
|
||||
"Polly - Prepare code for polly", false, false)
|
||||
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
|
||||
INITIALIZE_PASS_END(CodePreparation, "polly-prepare",
|
||||
"Polly - Prepare code for polly", false, false)
|
||||
|
|
|
|||
|
|
@ -66,8 +66,8 @@ void DeadCodeElim::getAnalysisUsage(AnalysisUsage &AU) const {
|
|||
Pass *polly::createDeadCodeElimPass() { return new DeadCodeElim(); }
|
||||
|
||||
INITIALIZE_PASS_BEGIN(DeadCodeElim, "polly-dce",
|
||||
"Polly - Remove dead iterations", false, false);
|
||||
INITIALIZE_PASS_DEPENDENCY(Dependences);
|
||||
INITIALIZE_PASS_DEPENDENCY(ScopInfo);
|
||||
"Polly - Remove dead iterations", false, false)
|
||||
INITIALIZE_PASS_DEPENDENCY(Dependences)
|
||||
INITIALIZE_PASS_DEPENDENCY(ScopInfo)
|
||||
INITIALIZE_PASS_END(DeadCodeElim, "polly-dce", "Polly - Remove dead iterations",
|
||||
false, false)
|
||||
|
|
|
|||
|
|
@ -270,7 +270,7 @@ bool JSONImporter::runOnScop(Scop &scop) {
|
|||
isl_map *currentAccessMap = (*MI)->getAccessRelation();
|
||||
|
||||
if (isl_map_dim(newAccessMap, isl_dim_param) !=
|
||||
isl_map_dim(currentAccessMap, isl_dim_param)) {
|
||||
isl_map_dim(currentAccessMap, isl_dim_param)) {
|
||||
errs() << "JScop file changes the number of parameter dimensions\n";
|
||||
isl_map_free(currentAccessMap);
|
||||
isl_map_free(newAccessMap);
|
||||
|
|
@ -332,20 +332,17 @@ INITIALIZE_PASS_BEGIN(JSONExporter, "polly-export-jscop",
|
|||
"Polly - Export Scops as JSON"
|
||||
" (Writes a .jscop file for each Scop)",
|
||||
false, false);
|
||||
INITIALIZE_PASS_DEPENDENCY(Dependences);
|
||||
INITIALIZE_PASS_DEPENDENCY(Dependences)
|
||||
INITIALIZE_PASS_END(JSONExporter, "polly-export-jscop",
|
||||
"Polly - Export Scops as JSON"
|
||||
" (Writes a .jscop file for each Scop)",
|
||||
false, false)
|
||||
|
||||
// typedef to make clang-format introduce a linebreak
|
||||
typedef int clangformatdef;
|
||||
|
||||
INITIALIZE_PASS_BEGIN(JSONImporter, "polly-import-jscop",
|
||||
"Polly - Import Scops from JSON"
|
||||
" (Reads a .jscop file for each Scop)",
|
||||
false, false);
|
||||
INITIALIZE_PASS_DEPENDENCY(Dependences);
|
||||
INITIALIZE_PASS_DEPENDENCY(Dependences)
|
||||
INITIALIZE_PASS_END(JSONImporter, "polly-import-jscop",
|
||||
"Polly - Import Scops from JSON"
|
||||
" (Reads a .jscop file for each Scop)",
|
||||
|
|
|
|||
|
|
@ -694,7 +694,7 @@ StatementToIslMapTy *readScattering(Scop *S, scoplib_scop_p OScop) {
|
|||
ScopStmt *pollyStmt = *S->begin();
|
||||
|
||||
if (stmt->schedule->NbColumns ==
|
||||
2 + pollyStmt->getNumParams() + pollyStmt->getNumIterators()) {
|
||||
2 + pollyStmt->getNumParams() + pollyStmt->getNumIterators()) {
|
||||
numScatteringDims = maxScattering(stmt);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -39,6 +39,7 @@ class ScopLibExporter : public ScopPass {
|
|||
Scop *S;
|
||||
|
||||
std::string getFileName(Scop *S) const;
|
||||
|
||||
public:
|
||||
static char ID;
|
||||
explicit ScopLibExporter() : ScopPass(ID) {}
|
||||
|
|
|
|||
|
|
@ -74,6 +74,7 @@ class PollyIndVarSimplify : public LoopPass {
|
|||
|
||||
SmallVector<WeakVH, 16> DeadInsts;
|
||||
bool Changed;
|
||||
|
||||
public:
|
||||
|
||||
static char ID; // Pass identification, replacement for typeid
|
||||
|
|
@ -491,8 +492,8 @@ void PollyIndVarSimplify::RewriteNonIntegerIVs(Loop *L) {
|
|||
/// happen later, except that it's more powerful in some cases, because it's
|
||||
/// able to brute-force evaluate arbitrary instructions as long as they have
|
||||
/// constant operands at the beginning of the loop.
|
||||
void PollyIndVarSimplify::RewriteLoopExitValues(Loop *L,
|
||||
SCEVExpander &Rewriter) {
|
||||
void
|
||||
PollyIndVarSimplify::RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) {
|
||||
// Verify the input to the pass in already in LCSSA form.
|
||||
assert(L->isLCSSAForm(*DT));
|
||||
|
||||
|
|
@ -644,8 +645,8 @@ static bool isSafe(const SCEV *S, const Loop *L, ScalarEvolution *SE) {
|
|||
return false;
|
||||
}
|
||||
|
||||
void PollyIndVarSimplify::RewriteIVExpressions(Loop *L,
|
||||
SCEVExpander &Rewriter) {
|
||||
void
|
||||
PollyIndVarSimplify::RewriteIVExpressions(Loop *L, SCEVExpander &Rewriter) {
|
||||
// Rewrite all induction variable expressions in terms of the canonical
|
||||
// induction variable.
|
||||
//
|
||||
|
|
@ -899,13 +900,13 @@ Instruction *WidenIV::CloneIVUser(NarrowIVDefUse DU) {
|
|||
// comes from a widened IV, it should be removed during a future call to
|
||||
// WidenIVUse.
|
||||
Value *LHS = (DU.NarrowUse->getOperand(0) == DU.NarrowDef)
|
||||
? DU.WideDef
|
||||
: getExtend(DU.NarrowUse->getOperand(0), WideType, IsSigned,
|
||||
DU.NarrowUse);
|
||||
? DU.WideDef
|
||||
: getExtend(DU.NarrowUse->getOperand(0), WideType,
|
||||
IsSigned, DU.NarrowUse);
|
||||
Value *RHS = (DU.NarrowUse->getOperand(1) == DU.NarrowDef)
|
||||
? DU.WideDef
|
||||
: getExtend(DU.NarrowUse->getOperand(1), WideType, IsSigned,
|
||||
DU.NarrowUse);
|
||||
? DU.WideDef
|
||||
: getExtend(DU.NarrowUse->getOperand(1), WideType,
|
||||
IsSigned, DU.NarrowUse);
|
||||
|
||||
BinaryOperator *NarrowBO = cast<BinaryOperator>(DU.NarrowUse);
|
||||
BinaryOperator *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS,
|
||||
|
|
@ -975,7 +976,7 @@ const SCEVAddRecExpr *WidenIV::GetWideRecurrence(Instruction *NarrowUse) {
|
|||
|
||||
const SCEV *NarrowExpr = SE->getSCEV(NarrowUse);
|
||||
if (SE->getTypeSizeInBits(NarrowExpr->getType()) >=
|
||||
SE->getTypeSizeInBits(WideType)) {
|
||||
SE->getTypeSizeInBits(WideType)) {
|
||||
// NarrowUse implicitly widens its operand. e.g. a gep with a narrow
|
||||
// index. So don't follow this use.
|
||||
return 0;
|
||||
|
|
@ -1584,7 +1585,7 @@ static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L,
|
|||
|
||||
// For integer IVs, truncate the IV before computing IVInit + BECount.
|
||||
if (SE->getTypeSizeInBits(IVInit->getType()) >
|
||||
SE->getTypeSizeInBits(IVCount->getType()))
|
||||
SE->getTypeSizeInBits(IVCount->getType()))
|
||||
IVInit = SE->getTruncateExpr(IVInit, IVCount->getType());
|
||||
|
||||
IVLimit = SE->getAddExpr(IVInit, IVCount);
|
||||
|
|
@ -1674,7 +1675,7 @@ Value *PollyIndVarSimplify::LinearFunctionTestReplace(
|
|||
|
||||
IRBuilder<> Builder(BI);
|
||||
if (SE->getTypeSizeInBits(CmpIndVar->getType()) >
|
||||
SE->getTypeSizeInBits(ExitCnt->getType())) {
|
||||
SE->getTypeSizeInBits(ExitCnt->getType())) {
|
||||
CmpIndVar =
|
||||
Builder.CreateTrunc(CmpIndVar, ExitCnt->getType(), "lftr.wideiv");
|
||||
}
|
||||
|
|
@ -1901,7 +1902,7 @@ bool PollyIndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
|
|||
SmallVector<PHINode *, 2> OldCannIVs;
|
||||
while (PHINode *OldCannIV = L->getCanonicalInductionVariable()) {
|
||||
if (SE->getTypeSizeInBits(OldCannIV->getType()) >
|
||||
SE->getTypeSizeInBits(LargestType))
|
||||
SE->getTypeSizeInBits(LargestType))
|
||||
OldCannIV->removeFromParent();
|
||||
else
|
||||
break;
|
||||
|
|
@ -1985,7 +1986,7 @@ bool PollyIndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
|
|||
SE->forgetLoop(L);
|
||||
const SCEV *NewBECount = SE->getBackedgeTakenCount(L);
|
||||
if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) <
|
||||
SE->getTypeSizeInBits(NewBECount->getType()))
|
||||
SE->getTypeSizeInBits(NewBECount->getType()))
|
||||
NewBECount =
|
||||
SE->getTruncateOrNoop(NewBECount, BackedgeTakenCount->getType());
|
||||
else
|
||||
|
|
|
|||
|
|
@ -178,9 +178,9 @@ void IndependentBlocks::moveOperandTree(Instruction *Inst, const Region *R,
|
|||
|
||||
// If the Scop Region does not contain N, skip it and all its operands and
|
||||
// continue: because we reach a "parameter".
|
||||
// FIXME: we must keep the predicate instruction inside the Scop, otherwise
|
||||
// it will be translated to a load instruction, and we can not handle load
|
||||
// as affine predicate at this moment.
|
||||
// FIXME: we must keep the predicate instruction inside the Scop,
|
||||
// otherwise it will be translated to a load instruction, and we can not
|
||||
// handle load as affine predicate at this moment.
|
||||
if (!R->contains(Operand) && !isa<TerminatorInst>(CurInst)) {
|
||||
DEBUG(dbgs() << "Out of region.\n");
|
||||
continue;
|
||||
|
|
@ -235,8 +235,8 @@ void IndependentBlocks::moveOperandTree(Instruction *Inst, const Region *R,
|
|||
SE->forgetValue(Inst);
|
||||
}
|
||||
|
||||
bool IndependentBlocks::createIndependentBlocks(BasicBlock *BB,
|
||||
const Region *R) {
|
||||
bool
|
||||
IndependentBlocks::createIndependentBlocks(BasicBlock *BB, const Region *R) {
|
||||
std::vector<Instruction *> WorkList;
|
||||
for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE; ++II)
|
||||
if (!isSafeToMove(II) && !canSynthesize(II, LI, SE, R))
|
||||
|
|
@ -363,8 +363,8 @@ bool IndependentBlocks::onlyUsedInRegion(Instruction *Inst, const Region *R) {
|
|||
return true;
|
||||
}
|
||||
|
||||
bool IndependentBlocks::translateScalarToArray(Instruction *Inst,
|
||||
const Region *R) {
|
||||
bool
|
||||
IndependentBlocks::translateScalarToArray(Instruction *Inst, const Region *R) {
|
||||
if (canSynthesize(Inst, LI, SE, R) && onlyUsedInRegion(Inst, R))
|
||||
return false;
|
||||
|
||||
|
|
@ -418,8 +418,8 @@ bool IndependentBlocks::translateScalarToArray(Instruction *Inst,
|
|||
return true;
|
||||
}
|
||||
|
||||
bool IndependentBlocks::translateScalarToArray(BasicBlock *BB,
|
||||
const Region *R) {
|
||||
bool
|
||||
IndependentBlocks::translateScalarToArray(BasicBlock *BB, const Region *R) {
|
||||
bool changed = false;
|
||||
|
||||
SmallVector<Instruction *, 32> Insts;
|
||||
|
|
@ -434,8 +434,8 @@ bool IndependentBlocks::translateScalarToArray(BasicBlock *BB,
|
|||
return changed;
|
||||
}
|
||||
|
||||
bool IndependentBlocks::isIndependentBlock(const Region *R,
|
||||
BasicBlock *BB) const {
|
||||
bool
|
||||
IndependentBlocks::isIndependentBlock(const Region *R, BasicBlock *BB) const {
|
||||
for (BasicBlock::iterator II = BB->begin(), IE = --BB->end(); II != IE;
|
||||
++II) {
|
||||
Instruction *Inst = &*II;
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@ class RegionSimplify : public RegionPass {
|
|||
Region *r;
|
||||
void createSingleEntryEdge(Region *R);
|
||||
void createSingleExitEdge(Region *R);
|
||||
|
||||
public:
|
||||
static char ID;
|
||||
explicit RegionSimplify() : RegionPass(ID), r(0) {}
|
||||
|
|
|
|||
|
|
@ -96,8 +96,8 @@ void polly_allocateMemoryForHostAndDevice(
|
|||
void **HostData, PollyGPUDevicePtr **DevData, int MemSize);
|
||||
void polly_setKernelParameters(PollyGPUFunction *Kernel, int BlockWidth,
|
||||
int BlockHeight, PollyGPUDevicePtr *DevData);
|
||||
void polly_launchKernel(PollyGPUFunction *Kernel, int GridWidth,
|
||||
int GridHeight);
|
||||
void
|
||||
polly_launchKernel(PollyGPUFunction *Kernel, int GridWidth, int GridHeight);
|
||||
void polly_cleanupGPGPUResources(
|
||||
void *HostData, PollyGPUDevicePtr *DevData, PollyGPUModule *Module,
|
||||
PollyGPUContext *Context, PollyGPUFunction *Kernel);
|
||||
|
|
|
|||
Loading…
Reference in New Issue