1540 lines
108 KiB
C++
1540 lines
108 KiB
C++
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" --prefix-filecheck-ir-name _
|
|
// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -verify -fopenmp -fopenmp-version=51 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s
|
|
|
|
// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -verify -fopenmp-simd -fopenmp-version=51 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
|
|
// expected-no-diagnostics
|
|
|
|
struct ST {
|
|
int *a;
|
|
};
|
|
typedef int arr[10];
|
|
typedef ST STarr[10];
|
|
struct SA {
|
|
const int da[5] = { 0 };
|
|
ST g[10];
|
|
STarr &rg = g;
|
|
int i;
|
|
int &j = i;
|
|
int *k = &j;
|
|
int *&z = k;
|
|
int aa[10];
|
|
arr &raa = aa;
|
|
void func(int arg) {
|
|
#pragma omp target has_device_addr(k)
|
|
{k++;}
|
|
#pragma omp target has_device_addr(z)
|
|
{z++;}
|
|
#pragma omp target has_device_addr(aa)
|
|
{aa[0]=1;}
|
|
#pragma omp target has_device_addr(raa)
|
|
{raa[0] = 10;}
|
|
#pragma omp target has_device_addr(g)
|
|
{g[0].a= &i;}
|
|
#pragma omp target has_device_addr(da)
|
|
{int a = da[1];}
|
|
return;
|
|
}
|
|
};
|
|
|
|
struct SB {
|
|
unsigned A;
|
|
unsigned B;
|
|
float Arr[100];
|
|
float *Ptr;
|
|
float *foo() {
|
|
return &Arr[0];
|
|
}
|
|
};
|
|
|
|
struct SC {
|
|
unsigned A : 2;
|
|
unsigned B : 3;
|
|
unsigned C;
|
|
unsigned D;
|
|
float Arr[100];
|
|
SB S;
|
|
SB ArrS[100];
|
|
SB *PtrS;
|
|
SB *&RPtrS;
|
|
float *Ptr;
|
|
|
|
SC(SB *&_RPtrS) : RPtrS(_RPtrS) {}
|
|
};
|
|
|
|
union SD {
|
|
unsigned A;
|
|
float B;
|
|
};
|
|
|
|
struct S1;
|
|
extern S1 a;
|
|
class S2 {
|
|
mutable int a;
|
|
public:
|
|
S2():a(0) { }
|
|
S2(S2 &s2):a(s2.a) { }
|
|
static float S2s;
|
|
static const float S2sc;
|
|
};
|
|
const float S2::S2sc = 0;
|
|
const S2 b;
|
|
const S2 ba[5];
|
|
class S3 {
|
|
int a;
|
|
public:
|
|
S3():a(0) { }
|
|
S3(S3 &s3):a(s3.a) { }
|
|
};
|
|
const S3 c;
|
|
const S3 ca[5];
|
|
extern const int f;
|
|
class S4 {
|
|
int a;
|
|
S4();
|
|
S4(const S4 &s4);
|
|
public:
|
|
S4(int v):a(v) { }
|
|
};
|
|
class S5 {
|
|
int a;
|
|
S5():a(0) {}
|
|
S5(const S5 &s5):a(s5.a) { }
|
|
public:
|
|
S5(int v):a(v) { }
|
|
};
|
|
|
|
S3 h;
|
|
#pragma omp threadprivate(h)
|
|
|
|
typedef struct {
|
|
int a;
|
|
} S6;
|
|
|
|
template <typename T>
|
|
T tmain(T argc) {
|
|
const T da[5] = { 0 };
|
|
S6 h[10];
|
|
auto &rh = h;
|
|
T i;
|
|
T &j = i;
|
|
T *k = &j;
|
|
T *&z = k;
|
|
T aa[10];
|
|
#pragma omp target has_device_addr(k)
|
|
{k++;}
|
|
#pragma omp target has_device_addr(z)
|
|
{z++;}
|
|
#pragma omp target has_device_addr(aa)
|
|
{T a = aa[0];}
|
|
#pragma omp target has_device_addr(h)
|
|
{int a = h[0].a;}
|
|
return 0;
|
|
}
|
|
|
|
|
|
int main(int argc, char **argv) {
|
|
const int da[5] = { 0 };
|
|
S6 h[10];
|
|
auto &rh = h;
|
|
int i;
|
|
int &j = i;
|
|
int *k = &j;
|
|
int *&z = k;
|
|
int aa[10];
|
|
auto &raa = aa;
|
|
#pragma omp target has_device_addr(k)
|
|
{k++;}
|
|
#pragma omp target has_device_addr(z)
|
|
{z++;}
|
|
#pragma omp target has_device_addr(aa)
|
|
{aa[0]=1;}
|
|
#pragma omp target has_device_addr(raa)
|
|
{int a = raa[0];}
|
|
#pragma omp target has_device_addr(h)
|
|
{int a = h[1].a;}
|
|
#pragma omp target has_device_addr(da[1:3])
|
|
{int a = da[1];}
|
|
return tmain<int>(argc) + *tmain<int *>(&argc);
|
|
}
|
|
|
|
struct SomeKernel {
|
|
int targetDev;
|
|
float devPtr;
|
|
SomeKernel();
|
|
~SomeKernel();
|
|
|
|
template<unsigned int nRHS>
|
|
void apply() {
|
|
#pragma omp target has_device_addr(devPtr) device(targetDev)
|
|
{
|
|
devPtr++;
|
|
targetDev++;
|
|
}
|
|
}
|
|
};
|
|
|
|
void use_template() {
|
|
SomeKernel aKern;
|
|
aKern.apply<32>();
|
|
}
|
|
// CHECK-LABEL: define {{[^@]+}}@__cxx_global_var_init
|
|
// CHECK-SAME: () #[[ATTR0:[0-9]+]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: call void @_ZN2S2C1Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) @_ZL1b)
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@_ZN2S2C1Ev
|
|
// CHECK-SAME: (%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca %class.S2*, align 8
|
|
// CHECK-NEXT: store %class.S2* [[THIS]], %class.S2** [[THIS_ADDR]], align 8
|
|
// CHECK-NEXT: [[THIS1:%.*]] = load %class.S2*, %class.S2** [[THIS_ADDR]], align 8
|
|
// CHECK-NEXT: call void @_ZN2S2C2Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@_ZN2S2C2Ev
|
|
// CHECK-SAME: (%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca %class.S2*, align 8
|
|
// CHECK-NEXT: store %class.S2* [[THIS]], %class.S2** [[THIS_ADDR]], align 8
|
|
// CHECK-NEXT: [[THIS1:%.*]] = load %class.S2*, %class.S2** [[THIS_ADDR]], align 8
|
|
// CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[CLASS_S2:%.*]], %class.S2* [[THIS1]], i32 0, i32 0
|
|
// CHECK-NEXT: store i32 0, i32* [[A]], align 4
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
|
|
// CHECK-SAME: () #[[ATTR0]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: br label [[ARRAYCTOR_LOOP:%.*]]
|
|
// CHECK: arrayctor.loop:
|
|
// CHECK-NEXT: [[ARRAYCTOR_CUR:%.*]] = phi %class.S2* [ getelementptr inbounds ([5 x %class.S2], [5 x %class.S2]* @_ZL2ba, i32 0, i32 0), [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
|
|
// CHECK-NEXT: call void @_ZN2S2C1Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
|
|
// CHECK-NEXT: [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[CLASS_S2:%.*]], %class.S2* [[ARRAYCTOR_CUR]], i64 1
|
|
// CHECK-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq %class.S2* [[ARRAYCTOR_NEXT]], getelementptr inbounds ([5 x %class.S2], [5 x %class.S2]* @_ZL2ba, i64 1, i64 0)
|
|
// CHECK-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
|
|
// CHECK: arrayctor.cont:
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
|
|
// CHECK-SAME: () #[[ATTR0]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: call void @_ZN2S3C1Ev(%class.S3* noundef nonnull align 4 dereferenceable(4) @_ZL1c)
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@_ZN2S3C1Ev
|
|
// CHECK-SAME: (%class.S3* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca %class.S3*, align 8
|
|
// CHECK-NEXT: store %class.S3* [[THIS]], %class.S3** [[THIS_ADDR]], align 8
|
|
// CHECK-NEXT: [[THIS1:%.*]] = load %class.S3*, %class.S3** [[THIS_ADDR]], align 8
|
|
// CHECK-NEXT: call void @_ZN2S3C2Ev(%class.S3* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@_ZN2S3C2Ev
|
|
// CHECK-SAME: (%class.S3* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca %class.S3*, align 8
|
|
// CHECK-NEXT: store %class.S3* [[THIS]], %class.S3** [[THIS_ADDR]], align 8
|
|
// CHECK-NEXT: [[THIS1:%.*]] = load %class.S3*, %class.S3** [[THIS_ADDR]], align 8
|
|
// CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[CLASS_S3:%.*]], %class.S3* [[THIS1]], i32 0, i32 0
|
|
// CHECK-NEXT: store i32 0, i32* [[A]], align 4
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@__cxx_global_var_init.3
|
|
// CHECK-SAME: () #[[ATTR0]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: br label [[ARRAYCTOR_LOOP:%.*]]
|
|
// CHECK: arrayctor.loop:
|
|
// CHECK-NEXT: [[ARRAYCTOR_CUR:%.*]] = phi %class.S3* [ getelementptr inbounds ([5 x %class.S3], [5 x %class.S3]* @_ZL2ca, i32 0, i32 0), [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
|
|
// CHECK-NEXT: call void @_ZN2S3C1Ev(%class.S3* noundef nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
|
|
// CHECK-NEXT: [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[CLASS_S3:%.*]], %class.S3* [[ARRAYCTOR_CUR]], i64 1
|
|
// CHECK-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq %class.S3* [[ARRAYCTOR_NEXT]], getelementptr inbounds ([5 x %class.S3], [5 x %class.S3]* @_ZL2ca, i64 1, i64 0)
|
|
// CHECK-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
|
|
// CHECK: arrayctor.cont:
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@__cxx_global_var_init.4
|
|
// CHECK-SAME: () #[[ATTR0]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: call void @_ZN2S3C1Ev(%class.S3* noundef nonnull align 4 dereferenceable(4) @h)
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@main
|
|
// CHECK-SAME: (i32 noundef signext [[ARGC:%.*]], i8** noundef [[ARGV:%.*]]) #[[ATTR2:[0-9]+]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
|
|
// CHECK-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
|
|
// CHECK-NEXT: [[DA:%.*]] = alloca [5 x i32], align 4
|
|
// CHECK-NEXT: [[H:%.*]] = alloca [10 x %struct.S6], align 4
|
|
// CHECK-NEXT: [[RH:%.*]] = alloca [10 x %struct.S6]*, align 8
|
|
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK-NEXT: [[J:%.*]] = alloca i32*, align 8
|
|
// CHECK-NEXT: [[K:%.*]] = alloca i32*, align 8
|
|
// CHECK-NEXT: [[Z:%.*]] = alloca i32**, align 8
|
|
// CHECK-NEXT: [[AA:%.*]] = alloca [10 x i32], align 4
|
|
// CHECK-NEXT: [[RAA:%.*]] = alloca [10 x i32]*, align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[TMP:%.*]] = alloca i32**, align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[_TMP13:%.*]] = alloca [10 x i32]*, align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS14:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_PTRS15:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS16:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS26:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_PTRS27:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS28:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: store i32 0, i32* [[RETVAL]], align 4
|
|
// CHECK-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
|
|
// CHECK-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast [5 x i32]* [[DA]] to i8*
|
|
// CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 20, i1 false)
|
|
// CHECK-NEXT: store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[RH]], align 8
|
|
// CHECK-NEXT: store i32* [[I]], i32** [[J]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load i32*, i32** [[J]], align 8
|
|
// CHECK-NEXT: store i32* [[TMP1]], i32** [[K]], align 8
|
|
// CHECK-NEXT: store i32** [[K]], i32*** [[Z]], align 8
|
|
// CHECK-NEXT: store [10 x i32]* [[AA]], [10 x i32]** [[RAA]], align 8
|
|
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32***
|
|
// CHECK-NEXT: store i32** [[K]], i32*** [[TMP3]], align 8
|
|
// CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP5:%.*]] = bitcast i8** [[TMP4]] to i32***
|
|
// CHECK-NEXT: store i32** [[K]], i32*** [[TMP5]], align 8
|
|
// CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP6]], align 8
|
|
// CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
|
|
// CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP9]], align 4
|
|
// CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP10]], align 4
|
|
// CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
|
|
// CHECK-NEXT: store i8** [[TMP7]], i8*** [[TMP11]], align 8
|
|
// CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
|
|
// CHECK-NEXT: store i8** [[TMP8]], i8*** [[TMP12]], align 8
|
|
// CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP13]], align 8
|
|
// CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP14]], align 8
|
|
// CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP15]], align 8
|
|
// CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP16]], align 8
|
|
// CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
|
|
// CHECK-NEXT: store i64 0, i64* [[TMP17]], align 8
|
|
// CHECK-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l145.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
|
|
// CHECK-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
|
|
// CHECK-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
|
|
// CHECK: omp_offload.failed:
|
|
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l145(i32** [[K]]) #[[ATTR5:[0-9]+]]
|
|
// CHECK-NEXT: br label [[OMP_OFFLOAD_CONT]]
|
|
// CHECK: omp_offload.cont:
|
|
// CHECK-NEXT: [[TMP20:%.*]] = load i32**, i32*** [[Z]], align 8
|
|
// CHECK-NEXT: store i32** [[TMP20]], i32*** [[TMP]], align 8
|
|
// CHECK-NEXT: [[TMP21:%.*]] = load i32**, i32*** [[TMP]], align 8
|
|
// CHECK-NEXT: [[TMP22:%.*]] = load i32**, i32*** [[TMP]], align 8
|
|
// CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32***
|
|
// CHECK-NEXT: store i32** [[TMP22]], i32*** [[TMP24]], align 8
|
|
// CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32***
|
|
// CHECK-NEXT: store i32** [[TMP22]], i32*** [[TMP26]], align 8
|
|
// CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP27]], align 8
|
|
// CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
|
|
// CHECK-NEXT: [[KERNEL_ARGS4:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 0
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP30]], align 4
|
|
// CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 1
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP31]], align 4
|
|
// CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 2
|
|
// CHECK-NEXT: store i8** [[TMP28]], i8*** [[TMP32]], align 8
|
|
// CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 3
|
|
// CHECK-NEXT: store i8** [[TMP29]], i8*** [[TMP33]], align 8
|
|
// CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 4
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.5, i32 0, i32 0), i64** [[TMP34]], align 8
|
|
// CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 5
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i64** [[TMP35]], align 8
|
|
// CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 6
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP36]], align 8
|
|
// CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 7
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP37]], align 8
|
|
// CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 8
|
|
// CHECK-NEXT: store i64 0, i64* [[TMP38]], align 8
|
|
// CHECK-NEXT: [[TMP39:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]])
|
|
// CHECK-NEXT: [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0
|
|
// CHECK-NEXT: br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
|
|
// CHECK: omp_offload.failed5:
|
|
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32** [[TMP21]]) #[[ATTR5]]
|
|
// CHECK-NEXT: br label [[OMP_OFFLOAD_CONT6]]
|
|
// CHECK: omp_offload.cont6:
|
|
// CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP42:%.*]] = bitcast i8** [[TMP41]] to [10 x i32]**
|
|
// CHECK-NEXT: store [10 x i32]* [[AA]], [10 x i32]** [[TMP42]], align 8
|
|
// CHECK-NEXT: [[TMP43:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to [10 x i32]**
|
|
// CHECK-NEXT: store [10 x i32]* [[AA]], [10 x i32]** [[TMP44]], align 8
|
|
// CHECK-NEXT: [[TMP45:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP45]], align 8
|
|
// CHECK-NEXT: [[TMP46:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP47:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
|
|
// CHECK-NEXT: [[KERNEL_ARGS10:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 0
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP48]], align 4
|
|
// CHECK-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 1
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP49]], align 4
|
|
// CHECK-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 2
|
|
// CHECK-NEXT: store i8** [[TMP46]], i8*** [[TMP50]], align 8
|
|
// CHECK-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 3
|
|
// CHECK-NEXT: store i8** [[TMP47]], i8*** [[TMP51]], align 8
|
|
// CHECK-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 4
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.7, i32 0, i32 0), i64** [[TMP52]], align 8
|
|
// CHECK-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 5
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.8, i32 0, i32 0), i64** [[TMP53]], align 8
|
|
// CHECK-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 6
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP54]], align 8
|
|
// CHECK-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 7
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP55]], align 8
|
|
// CHECK-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 8
|
|
// CHECK-NEXT: store i64 0, i64* [[TMP56]], align 8
|
|
// CHECK-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l149.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]])
|
|
// CHECK-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0
|
|
// CHECK-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
|
|
// CHECK: omp_offload.failed11:
|
|
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l149([10 x i32]* [[AA]]) #[[ATTR5]]
|
|
// CHECK-NEXT: br label [[OMP_OFFLOAD_CONT12]]
|
|
// CHECK: omp_offload.cont12:
|
|
// CHECK-NEXT: [[TMP59:%.*]] = load [10 x i32]*, [10 x i32]** [[RAA]], align 8
|
|
// CHECK-NEXT: store [10 x i32]* [[TMP59]], [10 x i32]** [[_TMP13]], align 8
|
|
// CHECK-NEXT: [[TMP60:%.*]] = load [10 x i32]*, [10 x i32]** [[_TMP13]], align 8
|
|
// CHECK-NEXT: [[TMP61:%.*]] = load [10 x i32]*, [10 x i32]** [[_TMP13]], align 8
|
|
// CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to [10 x i32]**
|
|
// CHECK-NEXT: store [10 x i32]* [[TMP61]], [10 x i32]** [[TMP63]], align 8
|
|
// CHECK-NEXT: [[TMP64:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to [10 x i32]**
|
|
// CHECK-NEXT: store [10 x i32]* [[TMP61]], [10 x i32]** [[TMP65]], align 8
|
|
// CHECK-NEXT: [[TMP66:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i64 0, i64 0
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP66]], align 8
|
|
// CHECK-NEXT: [[TMP67:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP68:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0
|
|
// CHECK-NEXT: [[KERNEL_ARGS17:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS17]], i32 0, i32 0
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP69]], align 4
|
|
// CHECK-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS17]], i32 0, i32 1
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP70]], align 4
|
|
// CHECK-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS17]], i32 0, i32 2
|
|
// CHECK-NEXT: store i8** [[TMP67]], i8*** [[TMP71]], align 8
|
|
// CHECK-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS17]], i32 0, i32 3
|
|
// CHECK-NEXT: store i8** [[TMP68]], i8*** [[TMP72]], align 8
|
|
// CHECK-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS17]], i32 0, i32 4
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.9, i32 0, i32 0), i64** [[TMP73]], align 8
|
|
// CHECK-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS17]], i32 0, i32 5
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.10, i32 0, i32 0), i64** [[TMP74]], align 8
|
|
// CHECK-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS17]], i32 0, i32 6
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP75]], align 8
|
|
// CHECK-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS17]], i32 0, i32 7
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP76]], align 8
|
|
// CHECK-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS17]], i32 0, i32 8
|
|
// CHECK-NEXT: store i64 0, i64* [[TMP77]], align 8
|
|
// CHECK-NEXT: [[TMP78:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS17]])
|
|
// CHECK-NEXT: [[TMP79:%.*]] = icmp ne i32 [[TMP78]], 0
|
|
// CHECK-NEXT: br i1 [[TMP79]], label [[OMP_OFFLOAD_FAILED18:%.*]], label [[OMP_OFFLOAD_CONT19:%.*]]
|
|
// CHECK: omp_offload.failed18:
|
|
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151([10 x i32]* [[TMP60]]) #[[ATTR5]]
|
|
// CHECK-NEXT: br label [[OMP_OFFLOAD_CONT19]]
|
|
// CHECK: omp_offload.cont19:
|
|
// CHECK-NEXT: [[TMP80:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to [10 x %struct.S6]**
|
|
// CHECK-NEXT: store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[TMP81]], align 8
|
|
// CHECK-NEXT: [[TMP82:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to [10 x %struct.S6]**
|
|
// CHECK-NEXT: store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[TMP83]], align 8
|
|
// CHECK-NEXT: [[TMP84:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP84]], align 8
|
|
// CHECK-NEXT: [[TMP85:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP86:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
|
|
// CHECK-NEXT: [[KERNEL_ARGS23:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK-NEXT: [[TMP87:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS23]], i32 0, i32 0
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP87]], align 4
|
|
// CHECK-NEXT: [[TMP88:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS23]], i32 0, i32 1
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP88]], align 4
|
|
// CHECK-NEXT: [[TMP89:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS23]], i32 0, i32 2
|
|
// CHECK-NEXT: store i8** [[TMP85]], i8*** [[TMP89]], align 8
|
|
// CHECK-NEXT: [[TMP90:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS23]], i32 0, i32 3
|
|
// CHECK-NEXT: store i8** [[TMP86]], i8*** [[TMP90]], align 8
|
|
// CHECK-NEXT: [[TMP91:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS23]], i32 0, i32 4
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64** [[TMP91]], align 8
|
|
// CHECK-NEXT: [[TMP92:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS23]], i32 0, i32 5
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i64** [[TMP92]], align 8
|
|
// CHECK-NEXT: [[TMP93:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS23]], i32 0, i32 6
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP93]], align 8
|
|
// CHECK-NEXT: [[TMP94:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS23]], i32 0, i32 7
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP94]], align 8
|
|
// CHECK-NEXT: [[TMP95:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS23]], i32 0, i32 8
|
|
// CHECK-NEXT: store i64 0, i64* [[TMP95]], align 8
|
|
// CHECK-NEXT: [[TMP96:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS23]])
|
|
// CHECK-NEXT: [[TMP97:%.*]] = icmp ne i32 [[TMP96]], 0
|
|
// CHECK-NEXT: br i1 [[TMP97]], label [[OMP_OFFLOAD_FAILED24:%.*]], label [[OMP_OFFLOAD_CONT25:%.*]]
|
|
// CHECK: omp_offload.failed24:
|
|
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153([10 x %struct.S6]* [[H]]) #[[ATTR5]]
|
|
// CHECK-NEXT: br label [[OMP_OFFLOAD_CONT25]]
|
|
// CHECK: omp_offload.cont25:
|
|
// CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i32], [5 x i32]* [[DA]], i64 0, i64 0
|
|
// CHECK-NEXT: [[TMP98:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i32**
|
|
// CHECK-NEXT: store i32* [[ARRAYDECAY]], i32** [[TMP99]], align 8
|
|
// CHECK-NEXT: [[TMP100:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i32**
|
|
// CHECK-NEXT: store i32* [[ARRAYDECAY]], i32** [[TMP101]], align 8
|
|
// CHECK-NEXT: [[TMP102:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 0
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP102]], align 8
|
|
// CHECK-NEXT: [[TMP103:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP104:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0
|
|
// CHECK-NEXT: [[KERNEL_ARGS29:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK-NEXT: [[TMP105:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 0
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP105]], align 4
|
|
// CHECK-NEXT: [[TMP106:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 1
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP106]], align 4
|
|
// CHECK-NEXT: [[TMP107:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 2
|
|
// CHECK-NEXT: store i8** [[TMP103]], i8*** [[TMP107]], align 8
|
|
// CHECK-NEXT: [[TMP108:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 3
|
|
// CHECK-NEXT: store i8** [[TMP104]], i8*** [[TMP108]], align 8
|
|
// CHECK-NEXT: [[TMP109:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 4
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64** [[TMP109]], align 8
|
|
// CHECK-NEXT: [[TMP110:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 5
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i64** [[TMP110]], align 8
|
|
// CHECK-NEXT: [[TMP111:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 6
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP111]], align 8
|
|
// CHECK-NEXT: [[TMP112:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 7
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP112]], align 8
|
|
// CHECK-NEXT: [[TMP113:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 8
|
|
// CHECK-NEXT: store i64 0, i64* [[TMP113]], align 8
|
|
// CHECK-NEXT: [[TMP114:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]])
|
|
// CHECK-NEXT: [[TMP115:%.*]] = icmp ne i32 [[TMP114]], 0
|
|
// CHECK-NEXT: br i1 [[TMP115]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
|
|
// CHECK: omp_offload.failed30:
|
|
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155([5 x i32]* [[DA]]) #[[ATTR5]]
|
|
// CHECK-NEXT: br label [[OMP_OFFLOAD_CONT31]]
|
|
// CHECK: omp_offload.cont31:
|
|
// CHECK-NEXT: [[TMP116:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
|
|
// CHECK-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_S0_(i32 noundef signext [[TMP116]])
|
|
// CHECK-NEXT: [[CALL32:%.*]] = call noundef i32* @_Z5tmainIPiET_S1_(i32* noundef [[ARGC_ADDR]])
|
|
// CHECK-NEXT: [[TMP117:%.*]] = load i32, i32* [[CALL32]], align 4
|
|
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[CALL]], [[TMP117]]
|
|
// CHECK-NEXT: ret i32 [[ADD]]
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l145
|
|
// CHECK-SAME: (i32** noundef nonnull align 8 dereferenceable(8) [[K:%.*]]) #[[ATTR4:[0-9]+]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[K_ADDR:%.*]] = alloca i32**, align 8
|
|
// CHECK-NEXT: store i32** [[K]], i32*** [[K_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load i32**, i32*** [[K_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load i32*, i32** [[TMP0]], align 8
|
|
// CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 1
|
|
// CHECK-NEXT: store i32* [[INCDEC_PTR]], i32** [[TMP0]], align 8
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147
|
|
// CHECK-SAME: (i32** noundef nonnull align 8 dereferenceable(8) [[Z:%.*]]) #[[ATTR4]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[Z_ADDR:%.*]] = alloca i32**, align 8
|
|
// CHECK-NEXT: [[TMP:%.*]] = alloca i32**, align 8
|
|
// CHECK-NEXT: store i32** [[Z]], i32*** [[Z_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load i32**, i32*** [[Z_ADDR]], align 8
|
|
// CHECK-NEXT: store i32** [[TMP0]], i32*** [[TMP]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[TMP]], align 8
|
|
// CHECK-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
|
|
// CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i32 1
|
|
// CHECK-NEXT: store i32* [[INCDEC_PTR]], i32** [[TMP1]], align 8
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l149
|
|
// CHECK-SAME: ([10 x i32]* noundef nonnull align 4 dereferenceable(40) [[AA:%.*]]) #[[ATTR4]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[AA_ADDR:%.*]] = alloca [10 x i32]*, align 8
|
|
// CHECK-NEXT: store [10 x i32]* [[AA]], [10 x i32]** [[AA_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[AA_ADDR]], align 8
|
|
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 0
|
|
// CHECK-NEXT: store i32 1, i32* [[ARRAYIDX]], align 4
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151
|
|
// CHECK-SAME: ([10 x i32]* noundef nonnull align 4 dereferenceable(40) [[RAA:%.*]]) #[[ATTR4]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[RAA_ADDR:%.*]] = alloca [10 x i32]*, align 8
|
|
// CHECK-NEXT: [[TMP:%.*]] = alloca [10 x i32]*, align 8
|
|
// CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
|
|
// CHECK-NEXT: store [10 x i32]* [[RAA]], [10 x i32]** [[RAA_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[RAA_ADDR]], align 8
|
|
// CHECK-NEXT: store [10 x i32]* [[TMP0]], [10 x i32]** [[TMP]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load [10 x i32]*, [10 x i32]** [[TMP]], align 8
|
|
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP1]], i64 0, i64 0
|
|
// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
|
|
// CHECK-NEXT: store i32 [[TMP2]], i32* [[A]], align 4
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153
|
|
// CHECK-SAME: ([10 x %struct.S6]* noundef nonnull align 4 dereferenceable(40) [[H:%.*]]) #[[ATTR4]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[H_ADDR:%.*]] = alloca [10 x %struct.S6]*, align 8
|
|
// CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
|
|
// CHECK-NEXT: store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[H_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load [10 x %struct.S6]*, [10 x %struct.S6]** [[H_ADDR]], align 8
|
|
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x %struct.S6], [10 x %struct.S6]* [[TMP0]], i64 0, i64 1
|
|
// CHECK-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S6:%.*]], %struct.S6* [[ARRAYIDX]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[A1]], align 4
|
|
// CHECK-NEXT: store i32 [[TMP1]], i32* [[A]], align 4
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155
|
|
// CHECK-SAME: ([5 x i32]* noundef nonnull align 4 dereferenceable(20) [[DA:%.*]]) #[[ATTR4]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DA_ADDR:%.*]] = alloca [5 x i32]*, align 8
|
|
// CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
|
|
// CHECK-NEXT: store [5 x i32]* [[DA]], [5 x i32]** [[DA_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load [5 x i32]*, [5 x i32]** [[DA_ADDR]], align 8
|
|
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [5 x i32], [5 x i32]* [[TMP0]], i64 0, i64 1
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
|
|
// CHECK-NEXT: store i32 [[TMP1]], i32* [[A]], align 4
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@_Z5tmainIiET_S0_
|
|
// CHECK-SAME: (i32 noundef signext [[ARGC:%.*]]) #[[ATTR6:[0-9]+]] comdat {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-NEXT: [[DA:%.*]] = alloca [5 x i32], align 4
|
|
// CHECK-NEXT: [[H:%.*]] = alloca [10 x %struct.S6], align 4
|
|
// CHECK-NEXT: [[RH:%.*]] = alloca [10 x %struct.S6]*, align 8
|
|
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK-NEXT: [[J:%.*]] = alloca i32*, align 8
|
|
// CHECK-NEXT: [[K:%.*]] = alloca i32*, align 8
|
|
// CHECK-NEXT: [[Z:%.*]] = alloca i32**, align 8
|
|
// CHECK-NEXT: [[AA:%.*]] = alloca [10 x i32], align 4
|
|
// CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[TMP:%.*]] = alloca i32**, align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_PTRS14:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast [5 x i32]* [[DA]] to i8*
|
|
// CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 20, i1 false)
|
|
// CHECK-NEXT: store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[RH]], align 8
|
|
// CHECK-NEXT: store i32* [[I]], i32** [[J]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load i32*, i32** [[J]], align 8
|
|
// CHECK-NEXT: store i32* [[TMP1]], i32** [[K]], align 8
|
|
// CHECK-NEXT: store i32** [[K]], i32*** [[Z]], align 8
|
|
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32***
|
|
// CHECK-NEXT: store i32** [[K]], i32*** [[TMP3]], align 8
|
|
// CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP5:%.*]] = bitcast i8** [[TMP4]] to i32***
|
|
// CHECK-NEXT: store i32** [[K]], i32*** [[TMP5]], align 8
|
|
// CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP6]], align 8
|
|
// CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
|
|
// CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP9]], align 4
|
|
// CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP10]], align 4
|
|
// CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
|
|
// CHECK-NEXT: store i8** [[TMP7]], i8*** [[TMP11]], align 8
|
|
// CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
|
|
// CHECK-NEXT: store i8** [[TMP8]], i8*** [[TMP12]], align 8
|
|
// CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.15, i32 0, i32 0), i64** [[TMP13]], align 8
|
|
// CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i64** [[TMP14]], align 8
|
|
// CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP15]], align 8
|
|
// CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP16]], align 8
|
|
// CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
|
|
// CHECK-NEXT: store i64 0, i64* [[TMP17]], align 8
|
|
// CHECK-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l123.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
|
|
// CHECK-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
|
|
// CHECK-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
|
|
// CHECK: omp_offload.failed:
|
|
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l123(i32** [[K]]) #[[ATTR5]]
|
|
// CHECK-NEXT: br label [[OMP_OFFLOAD_CONT]]
|
|
// CHECK: omp_offload.cont:
|
|
// CHECK-NEXT: [[TMP20:%.*]] = load i32**, i32*** [[Z]], align 8
|
|
// CHECK-NEXT: store i32** [[TMP20]], i32*** [[TMP]], align 8
|
|
// CHECK-NEXT: [[TMP21:%.*]] = load i32**, i32*** [[TMP]], align 8
|
|
// CHECK-NEXT: [[TMP22:%.*]] = load i32**, i32*** [[TMP]], align 8
|
|
// CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32***
|
|
// CHECK-NEXT: store i32** [[TMP22]], i32*** [[TMP24]], align 8
|
|
// CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32***
|
|
// CHECK-NEXT: store i32** [[TMP22]], i32*** [[TMP26]], align 8
|
|
// CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP27]], align 8
|
|
// CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
|
|
// CHECK-NEXT: [[KERNEL_ARGS4:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 0
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP30]], align 4
|
|
// CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 1
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP31]], align 4
|
|
// CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 2
|
|
// CHECK-NEXT: store i8** [[TMP28]], i8*** [[TMP32]], align 8
|
|
// CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 3
|
|
// CHECK-NEXT: store i8** [[TMP29]], i8*** [[TMP33]], align 8
|
|
// CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 4
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.17, i32 0, i32 0), i64** [[TMP34]], align 8
|
|
// CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 5
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.18, i32 0, i32 0), i64** [[TMP35]], align 8
|
|
// CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 6
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP36]], align 8
|
|
// CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 7
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP37]], align 8
|
|
// CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 8
|
|
// CHECK-NEXT: store i64 0, i64* [[TMP38]], align 8
|
|
// CHECK-NEXT: [[TMP39:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l125.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]])
|
|
// CHECK-NEXT: [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0
|
|
// CHECK-NEXT: br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
|
|
// CHECK: omp_offload.failed5:
|
|
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l125(i32** [[TMP21]]) #[[ATTR5]]
|
|
// CHECK-NEXT: br label [[OMP_OFFLOAD_CONT6]]
|
|
// CHECK: omp_offload.cont6:
|
|
// CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP42:%.*]] = bitcast i8** [[TMP41]] to [10 x i32]**
|
|
// CHECK-NEXT: store [10 x i32]* [[AA]], [10 x i32]** [[TMP42]], align 8
|
|
// CHECK-NEXT: [[TMP43:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to [10 x i32]**
|
|
// CHECK-NEXT: store [10 x i32]* [[AA]], [10 x i32]** [[TMP44]], align 8
|
|
// CHECK-NEXT: [[TMP45:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP45]], align 8
|
|
// CHECK-NEXT: [[TMP46:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP47:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
|
|
// CHECK-NEXT: [[KERNEL_ARGS10:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 0
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP48]], align 4
|
|
// CHECK-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 1
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP49]], align 4
|
|
// CHECK-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 2
|
|
// CHECK-NEXT: store i8** [[TMP46]], i8*** [[TMP50]], align 8
|
|
// CHECK-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 3
|
|
// CHECK-NEXT: store i8** [[TMP47]], i8*** [[TMP51]], align 8
|
|
// CHECK-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 4
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64** [[TMP52]], align 8
|
|
// CHECK-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 5
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i64** [[TMP53]], align 8
|
|
// CHECK-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 6
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP54]], align 8
|
|
// CHECK-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 7
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP55]], align 8
|
|
// CHECK-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 8
|
|
// CHECK-NEXT: store i64 0, i64* [[TMP56]], align 8
|
|
// CHECK-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l127.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]])
|
|
// CHECK-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0
|
|
// CHECK-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
|
|
// CHECK: omp_offload.failed11:
|
|
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l127([10 x i32]* [[AA]]) #[[ATTR5]]
|
|
// CHECK-NEXT: br label [[OMP_OFFLOAD_CONT12]]
|
|
// CHECK: omp_offload.cont12:
|
|
// CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP60:%.*]] = bitcast i8** [[TMP59]] to [10 x %struct.S6]**
|
|
// CHECK-NEXT: store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[TMP60]], align 8
|
|
// CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to [10 x %struct.S6]**
|
|
// CHECK-NEXT: store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[TMP62]], align 8
|
|
// CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i64 0, i64 0
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP63]], align 8
|
|
// CHECK-NEXT: [[TMP64:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
|
|
// CHECK-NEXT: [[KERNEL_ARGS16:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 0
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP66]], align 4
|
|
// CHECK-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 1
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP67]], align 4
|
|
// CHECK-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 2
|
|
// CHECK-NEXT: store i8** [[TMP64]], i8*** [[TMP68]], align 8
|
|
// CHECK-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 3
|
|
// CHECK-NEXT: store i8** [[TMP65]], i8*** [[TMP69]], align 8
|
|
// CHECK-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 4
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.21, i32 0, i32 0), i64** [[TMP70]], align 8
|
|
// CHECK-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 5
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.22, i32 0, i32 0), i64** [[TMP71]], align 8
|
|
// CHECK-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 6
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP72]], align 8
|
|
// CHECK-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 7
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP73]], align 8
|
|
// CHECK-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 8
|
|
// CHECK-NEXT: store i64 0, i64* [[TMP74]], align 8
|
|
// CHECK-NEXT: [[TMP75:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l129.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]])
|
|
// CHECK-NEXT: [[TMP76:%.*]] = icmp ne i32 [[TMP75]], 0
|
|
// CHECK-NEXT: br i1 [[TMP76]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
|
|
// CHECK: omp_offload.failed17:
|
|
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l129([10 x %struct.S6]* [[H]]) #[[ATTR5]]
|
|
// CHECK-NEXT: br label [[OMP_OFFLOAD_CONT18]]
|
|
// CHECK: omp_offload.cont18:
|
|
// CHECK-NEXT: ret i32 0
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@_Z5tmainIPiET_S1_
|
|
// CHECK-SAME: (i32* noundef [[ARGC:%.*]]) #[[ATTR6]] comdat {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK-NEXT: [[DA:%.*]] = alloca [5 x i32*], align 8
|
|
// CHECK-NEXT: [[H:%.*]] = alloca [10 x %struct.S6], align 4
|
|
// CHECK-NEXT: [[RH:%.*]] = alloca [10 x %struct.S6]*, align 8
|
|
// CHECK-NEXT: [[I:%.*]] = alloca i32*, align 8
|
|
// CHECK-NEXT: [[J:%.*]] = alloca i32**, align 8
|
|
// CHECK-NEXT: [[K:%.*]] = alloca i32**, align 8
|
|
// CHECK-NEXT: [[Z:%.*]] = alloca i32***, align 8
|
|
// CHECK-NEXT: [[AA:%.*]] = alloca [10 x i32*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[TMP:%.*]] = alloca i32***, align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_PTRS14:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [1 x i8*], align 8
|
|
// CHECK-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast [5 x i32*]* [[DA]] to i8*
|
|
// CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP0]], i8 0, i64 40, i1 false)
|
|
// CHECK-NEXT: store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[RH]], align 8
|
|
// CHECK-NEXT: store i32** [[I]], i32*** [[J]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[J]], align 8
|
|
// CHECK-NEXT: store i32** [[TMP1]], i32*** [[K]], align 8
|
|
// CHECK-NEXT: store i32*** [[K]], i32**** [[Z]], align 8
|
|
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32****
|
|
// CHECK-NEXT: store i32*** [[K]], i32**** [[TMP3]], align 8
|
|
// CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP5:%.*]] = bitcast i8** [[TMP4]] to i32****
|
|
// CHECK-NEXT: store i32*** [[K]], i32**** [[TMP5]], align 8
|
|
// CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP6]], align 8
|
|
// CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
|
|
// CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP9]], align 4
|
|
// CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP10]], align 4
|
|
// CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
|
|
// CHECK-NEXT: store i8** [[TMP7]], i8*** [[TMP11]], align 8
|
|
// CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
|
|
// CHECK-NEXT: store i8** [[TMP8]], i8*** [[TMP12]], align 8
|
|
// CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.23, i32 0, i32 0), i64** [[TMP13]], align 8
|
|
// CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.24, i32 0, i32 0), i64** [[TMP14]], align 8
|
|
// CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP15]], align 8
|
|
// CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP16]], align 8
|
|
// CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
|
|
// CHECK-NEXT: store i64 0, i64* [[TMP17]], align 8
|
|
// CHECK-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l123.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
|
|
// CHECK-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
|
|
// CHECK-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
|
|
// CHECK: omp_offload.failed:
|
|
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l123(i32*** [[K]]) #[[ATTR5]]
|
|
// CHECK-NEXT: br label [[OMP_OFFLOAD_CONT]]
|
|
// CHECK: omp_offload.cont:
|
|
// CHECK-NEXT: [[TMP20:%.*]] = load i32***, i32**** [[Z]], align 8
|
|
// CHECK-NEXT: store i32*** [[TMP20]], i32**** [[TMP]], align 8
|
|
// CHECK-NEXT: [[TMP21:%.*]] = load i32***, i32**** [[TMP]], align 8
|
|
// CHECK-NEXT: [[TMP22:%.*]] = load i32***, i32**** [[TMP]], align 8
|
|
// CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32****
|
|
// CHECK-NEXT: store i32*** [[TMP22]], i32**** [[TMP24]], align 8
|
|
// CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32****
|
|
// CHECK-NEXT: store i32*** [[TMP22]], i32**** [[TMP26]], align 8
|
|
// CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP27]], align 8
|
|
// CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
|
|
// CHECK-NEXT: [[KERNEL_ARGS4:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 0
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP30]], align 4
|
|
// CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 1
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP31]], align 4
|
|
// CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 2
|
|
// CHECK-NEXT: store i8** [[TMP28]], i8*** [[TMP32]], align 8
|
|
// CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 3
|
|
// CHECK-NEXT: store i8** [[TMP29]], i8*** [[TMP33]], align 8
|
|
// CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 4
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.25, i32 0, i32 0), i64** [[TMP34]], align 8
|
|
// CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 5
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.26, i32 0, i32 0), i64** [[TMP35]], align 8
|
|
// CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 6
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP36]], align 8
|
|
// CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 7
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP37]], align 8
|
|
// CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 8
|
|
// CHECK-NEXT: store i64 0, i64* [[TMP38]], align 8
|
|
// CHECK-NEXT: [[TMP39:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l125.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]])
|
|
// CHECK-NEXT: [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0
|
|
// CHECK-NEXT: br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
|
|
// CHECK: omp_offload.failed5:
|
|
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l125(i32*** [[TMP21]]) #[[ATTR5]]
|
|
// CHECK-NEXT: br label [[OMP_OFFLOAD_CONT6]]
|
|
// CHECK: omp_offload.cont6:
|
|
// CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP42:%.*]] = bitcast i8** [[TMP41]] to [10 x i32*]**
|
|
// CHECK-NEXT: store [10 x i32*]* [[AA]], [10 x i32*]** [[TMP42]], align 8
|
|
// CHECK-NEXT: [[TMP43:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to [10 x i32*]**
|
|
// CHECK-NEXT: store [10 x i32*]* [[AA]], [10 x i32*]** [[TMP44]], align 8
|
|
// CHECK-NEXT: [[TMP45:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP45]], align 8
|
|
// CHECK-NEXT: [[TMP46:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP47:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
|
|
// CHECK-NEXT: [[KERNEL_ARGS10:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 0
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP48]], align 4
|
|
// CHECK-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 1
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP49]], align 4
|
|
// CHECK-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 2
|
|
// CHECK-NEXT: store i8** [[TMP46]], i8*** [[TMP50]], align 8
|
|
// CHECK-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 3
|
|
// CHECK-NEXT: store i8** [[TMP47]], i8*** [[TMP51]], align 8
|
|
// CHECK-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 4
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64** [[TMP52]], align 8
|
|
// CHECK-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 5
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i64** [[TMP53]], align 8
|
|
// CHECK-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 6
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP54]], align 8
|
|
// CHECK-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 7
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP55]], align 8
|
|
// CHECK-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 8
|
|
// CHECK-NEXT: store i64 0, i64* [[TMP56]], align 8
|
|
// CHECK-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l127.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]])
|
|
// CHECK-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0
|
|
// CHECK-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
|
|
// CHECK: omp_offload.failed11:
|
|
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l127([10 x i32*]* [[AA]]) #[[ATTR5]]
|
|
// CHECK-NEXT: br label [[OMP_OFFLOAD_CONT12]]
|
|
// CHECK: omp_offload.cont12:
|
|
// CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP60:%.*]] = bitcast i8** [[TMP59]] to [10 x %struct.S6]**
|
|
// CHECK-NEXT: store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[TMP60]], align 8
|
|
// CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to [10 x %struct.S6]**
|
|
// CHECK-NEXT: store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[TMP62]], align 8
|
|
// CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i64 0, i64 0
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP63]], align 8
|
|
// CHECK-NEXT: [[TMP64:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
|
|
// CHECK-NEXT: [[KERNEL_ARGS16:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 0
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP66]], align 4
|
|
// CHECK-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 1
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP67]], align 4
|
|
// CHECK-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 2
|
|
// CHECK-NEXT: store i8** [[TMP64]], i8*** [[TMP68]], align 8
|
|
// CHECK-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 3
|
|
// CHECK-NEXT: store i8** [[TMP65]], i8*** [[TMP69]], align 8
|
|
// CHECK-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 4
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.29, i32 0, i32 0), i64** [[TMP70]], align 8
|
|
// CHECK-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 5
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.30, i32 0, i32 0), i64** [[TMP71]], align 8
|
|
// CHECK-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 6
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP72]], align 8
|
|
// CHECK-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 7
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP73]], align 8
|
|
// CHECK-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 8
|
|
// CHECK-NEXT: store i64 0, i64* [[TMP74]], align 8
|
|
// CHECK-NEXT: [[TMP75:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l129.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]])
|
|
// CHECK-NEXT: [[TMP76:%.*]] = icmp ne i32 [[TMP75]], 0
|
|
// CHECK-NEXT: br i1 [[TMP76]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
|
|
// CHECK: omp_offload.failed17:
|
|
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l129([10 x %struct.S6]* [[H]]) #[[ATTR5]]
|
|
// CHECK-NEXT: br label [[OMP_OFFLOAD_CONT18]]
|
|
// CHECK: omp_offload.cont18:
|
|
// CHECK-NEXT: ret i32* null
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l123
|
|
// CHECK-SAME: (i32** noundef nonnull align 8 dereferenceable(8) [[K:%.*]]) #[[ATTR4]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[K_ADDR:%.*]] = alloca i32**, align 8
|
|
// CHECK-NEXT: store i32** [[K]], i32*** [[K_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load i32**, i32*** [[K_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load i32*, i32** [[TMP0]], align 8
|
|
// CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 1
|
|
// CHECK-NEXT: store i32* [[INCDEC_PTR]], i32** [[TMP0]], align 8
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l125
|
|
// CHECK-SAME: (i32** noundef nonnull align 8 dereferenceable(8) [[Z:%.*]]) #[[ATTR4]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[Z_ADDR:%.*]] = alloca i32**, align 8
|
|
// CHECK-NEXT: [[TMP:%.*]] = alloca i32**, align 8
|
|
// CHECK-NEXT: store i32** [[Z]], i32*** [[Z_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load i32**, i32*** [[Z_ADDR]], align 8
|
|
// CHECK-NEXT: store i32** [[TMP0]], i32*** [[TMP]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[TMP]], align 8
|
|
// CHECK-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
|
|
// CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i32 1
|
|
// CHECK-NEXT: store i32* [[INCDEC_PTR]], i32** [[TMP1]], align 8
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l127
|
|
// CHECK-SAME: ([10 x i32]* noundef nonnull align 4 dereferenceable(40) [[AA:%.*]]) #[[ATTR4]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[AA_ADDR:%.*]] = alloca [10 x i32]*, align 8
|
|
// CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
|
|
// CHECK-NEXT: store [10 x i32]* [[AA]], [10 x i32]** [[AA_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[AA_ADDR]], align 8
|
|
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 0
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
|
|
// CHECK-NEXT: store i32 [[TMP1]], i32* [[A]], align 4
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l129
|
|
// CHECK-SAME: ([10 x %struct.S6]* noundef nonnull align 4 dereferenceable(40) [[H:%.*]]) #[[ATTR4]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[H_ADDR:%.*]] = alloca [10 x %struct.S6]*, align 8
|
|
// CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
|
|
// CHECK-NEXT: store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[H_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load [10 x %struct.S6]*, [10 x %struct.S6]** [[H_ADDR]], align 8
|
|
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x %struct.S6], [10 x %struct.S6]* [[TMP0]], i64 0, i64 0
|
|
// CHECK-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S6:%.*]], %struct.S6* [[ARRAYIDX]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[A1]], align 4
|
|
// CHECK-NEXT: store i32 [[TMP1]], i32* [[A]], align 4
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l123
|
|
// CHECK-SAME: (i32*** noundef nonnull align 8 dereferenceable(8) [[K:%.*]]) #[[ATTR4]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[K_ADDR:%.*]] = alloca i32***, align 8
|
|
// CHECK-NEXT: store i32*** [[K]], i32**** [[K_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load i32***, i32**** [[K_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[TMP0]], align 8
|
|
// CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32*, i32** [[TMP1]], i32 1
|
|
// CHECK-NEXT: store i32** [[INCDEC_PTR]], i32*** [[TMP0]], align 8
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l125
|
|
// CHECK-SAME: (i32*** noundef nonnull align 8 dereferenceable(8) [[Z:%.*]]) #[[ATTR4]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[Z_ADDR:%.*]] = alloca i32***, align 8
|
|
// CHECK-NEXT: [[TMP:%.*]] = alloca i32***, align 8
|
|
// CHECK-NEXT: store i32*** [[Z]], i32**** [[Z_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load i32***, i32**** [[Z_ADDR]], align 8
|
|
// CHECK-NEXT: store i32*** [[TMP0]], i32**** [[TMP]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load i32***, i32**** [[TMP]], align 8
|
|
// CHECK-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[TMP1]], align 8
|
|
// CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32*, i32** [[TMP2]], i32 1
|
|
// CHECK-NEXT: store i32** [[INCDEC_PTR]], i32*** [[TMP1]], align 8
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l127
|
|
// CHECK-SAME: ([10 x i32*]* noundef nonnull align 8 dereferenceable(80) [[AA:%.*]]) #[[ATTR4]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[AA_ADDR:%.*]] = alloca [10 x i32*]*, align 8
|
|
// CHECK-NEXT: [[A:%.*]] = alloca i32*, align 8
|
|
// CHECK-NEXT: store [10 x i32*]* [[AA]], [10 x i32*]** [[AA_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load [10 x i32*]*, [10 x i32*]** [[AA_ADDR]], align 8
|
|
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32*], [10 x i32*]* [[TMP0]], i64 0, i64 0
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load i32*, i32** [[ARRAYIDX]], align 8
|
|
// CHECK-NEXT: store i32* [[TMP1]], i32** [[A]], align 8
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l129
|
|
// CHECK-SAME: ([10 x %struct.S6]* noundef nonnull align 4 dereferenceable(40) [[H:%.*]]) #[[ATTR4]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[H_ADDR:%.*]] = alloca [10 x %struct.S6]*, align 8
|
|
// CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
|
|
// CHECK-NEXT: store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[H_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load [10 x %struct.S6]*, [10 x %struct.S6]** [[H_ADDR]], align 8
|
|
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x %struct.S6], [10 x %struct.S6]* [[TMP0]], i64 0, i64 0
|
|
// CHECK-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S6:%.*]], %struct.S6* [[ARRAYIDX]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[A1]], align 4
|
|
// CHECK-NEXT: store i32 [[TMP1]], i32* [[A]], align 4
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@_Z12use_templatev
|
|
// CHECK-SAME: () #[[ATTR6]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[AKERN:%.*]] = alloca [[STRUCT_SOMEKERNEL:%.*]], align 4
|
|
// CHECK-NEXT: call void @_ZN10SomeKernelC1Ev(%struct.SomeKernel* noundef nonnull align 4 dereferenceable(8) [[AKERN]])
|
|
// CHECK-NEXT: call void @_ZN10SomeKernel5applyILj32EEEvv(%struct.SomeKernel* noundef nonnull align 4 dereferenceable(8) [[AKERN]])
|
|
// CHECK-NEXT: call void @_ZN10SomeKernelD1Ev(%struct.SomeKernel* noundef nonnull align 4 dereferenceable(8) [[AKERN]]) #[[ATTR5]]
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@_ZN10SomeKernel5applyILj32EEEvv
|
|
// CHECK-SAME: (%struct.SomeKernel* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) #[[ATTR6]] comdat align 2 {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SomeKernel*, align 8
|
|
// CHECK-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8
|
|
// CHECK-NEXT: store %struct.SomeKernel* [[THIS]], %struct.SomeKernel** [[THIS_ADDR]], align 8
|
|
// CHECK-NEXT: [[THIS1:%.*]] = load %struct.SomeKernel*, %struct.SomeKernel** [[THIS_ADDR]], align 8
|
|
// CHECK-NEXT: [[TARGETDEV:%.*]] = getelementptr inbounds [[STRUCT_SOMEKERNEL:%.*]], %struct.SomeKernel* [[THIS1]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[TARGETDEV]], align 4
|
|
// CHECK-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-NEXT: [[DEVPTR:%.*]] = getelementptr inbounds [[STRUCT_SOMEKERNEL]], %struct.SomeKernel* [[THIS1]], i32 0, i32 1
|
|
// CHECK-NEXT: [[TARGETDEV2:%.*]] = getelementptr inbounds [[STRUCT_SOMEKERNEL]], %struct.SomeKernel* [[THIS1]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr float, float* [[DEVPTR]], i32 1
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i32* [[TARGETDEV2]] to i8*
|
|
// CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[TMP1]] to i8*
|
|
// CHECK-NEXT: [[TMP4:%.*]] = ptrtoint i8* [[TMP3]] to i64
|
|
// CHECK-NEXT: [[TMP5:%.*]] = ptrtoint i8* [[TMP2]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[TMP4]], [[TMP5]]
|
|
// CHECK-NEXT: [[TMP7:%.*]] = sdiv exact i64 [[TMP6]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
|
|
// CHECK-NEXT: [[TMP8:%.*]] = bitcast [3 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
|
|
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP8]], i8* align 8 bitcast ([3 x i64]* @.offload_sizes.31 to i8*), i64 24, i1 false)
|
|
// CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SomeKernel**
|
|
// CHECK-NEXT: store %struct.SomeKernel* [[THIS1]], %struct.SomeKernel** [[TMP10]], align 8
|
|
// CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32**
|
|
// CHECK-NEXT: store i32* [[TARGETDEV2]], i32** [[TMP12]], align 8
|
|
// CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
|
|
// CHECK-NEXT: store i64 [[TMP7]], i64* [[TMP13]], align 8
|
|
// CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP14]], align 8
|
|
// CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
|
|
// CHECK-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to %struct.SomeKernel**
|
|
// CHECK-NEXT: store %struct.SomeKernel* [[THIS1]], %struct.SomeKernel** [[TMP16]], align 8
|
|
// CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
|
|
// CHECK-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to float**
|
|
// CHECK-NEXT: store float* [[DEVPTR]], float** [[TMP18]], align 8
|
|
// CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP19]], align 8
|
|
// CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
|
|
// CHECK-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to %struct.SomeKernel**
|
|
// CHECK-NEXT: store %struct.SomeKernel* [[THIS1]], %struct.SomeKernel** [[TMP21]], align 8
|
|
// CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
|
|
// CHECK-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32**
|
|
// CHECK-NEXT: store i32* [[TARGETDEV2]], i32** [[TMP23]], align 8
|
|
// CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP24]], align 8
|
|
// CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-NEXT: [[TMP29:%.*]] = sext i32 [[TMP28]] to i64
|
|
// CHECK-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
|
|
// CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP30]], align 4
|
|
// CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
|
|
// CHECK-NEXT: store i32 3, i32* [[TMP31]], align 4
|
|
// CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
|
|
// CHECK-NEXT: store i8** [[TMP25]], i8*** [[TMP32]], align 8
|
|
// CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
|
|
// CHECK-NEXT: store i8** [[TMP26]], i8*** [[TMP33]], align 8
|
|
// CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
|
|
// CHECK-NEXT: store i64* [[TMP27]], i64** [[TMP34]], align 8
|
|
// CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.32, i32 0, i32 0), i64** [[TMP35]], align 8
|
|
// CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP36]], align 8
|
|
// CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP37]], align 8
|
|
// CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
|
|
// CHECK-NEXT: store i64 0, i64* [[TMP38]], align 8
|
|
// CHECK-NEXT: [[TMP39:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 [[TMP29]], i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN10SomeKernel5applyILj32EEEvv_l168.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
|
|
// CHECK-NEXT: [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0
|
|
// CHECK-NEXT: br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
|
|
// CHECK: omp_offload.failed:
|
|
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN10SomeKernel5applyILj32EEEvv_l168(%struct.SomeKernel* [[THIS1]]) #[[ATTR5]]
|
|
// CHECK-NEXT: br label [[OMP_OFFLOAD_CONT]]
|
|
// CHECK: omp_offload.cont:
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN10SomeKernel5applyILj32EEEvv_l168
|
|
// CHECK-SAME: (%struct.SomeKernel* noundef [[THIS:%.*]]) #[[ATTR4]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SomeKernel*, align 8
|
|
// CHECK-NEXT: store %struct.SomeKernel* [[THIS]], %struct.SomeKernel** [[THIS_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load %struct.SomeKernel*, %struct.SomeKernel** [[THIS_ADDR]], align 8
|
|
// CHECK-NEXT: [[DEVPTR:%.*]] = getelementptr inbounds [[STRUCT_SOMEKERNEL:%.*]], %struct.SomeKernel* [[TMP0]], i32 0, i32 1
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[DEVPTR]], align 4
|
|
// CHECK-NEXT: [[INC:%.*]] = fadd float [[TMP1]], 1.000000e+00
|
|
// CHECK-NEXT: store float [[INC]], float* [[DEVPTR]], align 4
|
|
// CHECK-NEXT: [[TARGETDEV:%.*]] = getelementptr inbounds [[STRUCT_SOMEKERNEL]], %struct.SomeKernel* [[TMP0]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[TARGETDEV]], align 4
|
|
// CHECK-NEXT: [[INC1:%.*]] = add nsw i32 [[TMP2]], 1
|
|
// CHECK-NEXT: store i32 [[INC1]], i32* [[TARGETDEV]], align 4
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_target_has_device_addr_codegen.cpp
|
|
// CHECK-SAME: () #[[ATTR0]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: call void @__cxx_global_var_init()
|
|
// CHECK-NEXT: call void @__cxx_global_var_init.1()
|
|
// CHECK-NEXT: call void @__cxx_global_var_init.2()
|
|
// CHECK-NEXT: call void @__cxx_global_var_init.3()
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@__tls_init
|
|
// CHECK-SAME: () #[[ATTR0]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load i8, i8* @__tls_guard, align 1
|
|
// CHECK-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0
|
|
// CHECK-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT:%.*]], label [[EXIT:%.*]], !prof [[PROF18:![0-9]+]]
|
|
// CHECK: init:
|
|
// CHECK-NEXT: store i8 1, i8* @__tls_guard, align 1
|
|
// CHECK-NEXT: call void @__cxx_global_var_init.4()
|
|
// CHECK-NEXT: br label [[EXIT]]
|
|
// CHECK: exit:
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@_ZTW1h
|
|
// CHECK-SAME: () #[[ATTR10:[0-9]+]] comdat {
|
|
// CHECK-NEXT: call void @_ZTH1h()
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call align 4 %class.S3* @llvm.threadlocal.address.p0s_class.S3s(%class.S3* align 4 @h)
|
|
// CHECK-NEXT: ret %class.S3* [[TMP1]]
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
|
|
// CHECK-SAME: () #[[ATTR0]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: call void @__tgt_register_requires(i64 1)
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// SIMD-ONLY0-LABEL: define {{[^@]+}}@__cxx_global_var_init
|
|
// SIMD-ONLY0-SAME: () #[[ATTR0:[0-9]+]] {
|
|
// SIMD-ONLY0-NEXT: entry:
|
|
// SIMD-ONLY0-NEXT: call void @_ZN2S2C1Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) @_ZL1b)
|
|
// SIMD-ONLY0-NEXT: ret void
|
|
//
|
|
//
|
|
// SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN2S2C1Ev
|
|
// SIMD-ONLY0-SAME: (%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
|
|
// SIMD-ONLY0-NEXT: entry:
|
|
// SIMD-ONLY0-NEXT: [[THIS_ADDR:%.*]] = alloca %class.S2*, align 8
|
|
// SIMD-ONLY0-NEXT: store %class.S2* [[THIS]], %class.S2** [[THIS_ADDR]], align 8
|
|
// SIMD-ONLY0-NEXT: [[THIS1:%.*]] = load %class.S2*, %class.S2** [[THIS_ADDR]], align 8
|
|
// SIMD-ONLY0-NEXT: call void @_ZN2S2C2Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
|
|
// SIMD-ONLY0-NEXT: ret void
|
|
//
|
|
//
|
|
// SIMD-ONLY0-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
|
|
// SIMD-ONLY0-SAME: () #[[ATTR0]] {
|
|
// SIMD-ONLY0-NEXT: entry:
|
|
// SIMD-ONLY0-NEXT: br label [[ARRAYCTOR_LOOP:%.*]]
|
|
// SIMD-ONLY0: arrayctor.loop:
|
|
// SIMD-ONLY0-NEXT: [[ARRAYCTOR_CUR:%.*]] = phi %class.S2* [ getelementptr inbounds ([5 x %class.S2], [5 x %class.S2]* @_ZL2ba, i32 0, i32 0), [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
|
|
// SIMD-ONLY0-NEXT: call void @_ZN2S2C1Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
|
|
// SIMD-ONLY0-NEXT: [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[CLASS_S2:%.*]], %class.S2* [[ARRAYCTOR_CUR]], i64 1
|
|
// SIMD-ONLY0-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq %class.S2* [[ARRAYCTOR_NEXT]], getelementptr inbounds ([5 x %class.S2], [5 x %class.S2]* @_ZL2ba, i64 1, i64 0)
|
|
// SIMD-ONLY0-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
|
|
// SIMD-ONLY0: arrayctor.cont:
|
|
// SIMD-ONLY0-NEXT: ret void
|
|
//
|
|
//
|
|
// SIMD-ONLY0-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
|
|
// SIMD-ONLY0-SAME: () #[[ATTR0]] {
|
|
// SIMD-ONLY0-NEXT: entry:
|
|
// SIMD-ONLY0-NEXT: call void @_ZN2S3C1Ev(%class.S3* noundef nonnull align 4 dereferenceable(4) @_ZL1c)
|
|
// SIMD-ONLY0-NEXT: ret void
|
|
//
|
|
//
|
|
// SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN2S3C1Ev
|
|
// SIMD-ONLY0-SAME: (%class.S3* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
|
|
// SIMD-ONLY0-NEXT: entry:
|
|
// SIMD-ONLY0-NEXT: [[THIS_ADDR:%.*]] = alloca %class.S3*, align 8
|
|
// SIMD-ONLY0-NEXT: store %class.S3* [[THIS]], %class.S3** [[THIS_ADDR]], align 8
|
|
// SIMD-ONLY0-NEXT: [[THIS1:%.*]] = load %class.S3*, %class.S3** [[THIS_ADDR]], align 8
|
|
// SIMD-ONLY0-NEXT: call void @_ZN2S3C2Ev(%class.S3* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
|
|
// SIMD-ONLY0-NEXT: ret void
|
|
//
|
|
//
|
|
// SIMD-ONLY0-LABEL: define {{[^@]+}}@__cxx_global_var_init.3
|
|
// SIMD-ONLY0-SAME: () #[[ATTR0]] {
|
|
// SIMD-ONLY0-NEXT: entry:
|
|
// SIMD-ONLY0-NEXT: br label [[ARRAYCTOR_LOOP:%.*]]
|
|
// SIMD-ONLY0: arrayctor.loop:
|
|
// SIMD-ONLY0-NEXT: [[ARRAYCTOR_CUR:%.*]] = phi %class.S3* [ getelementptr inbounds ([5 x %class.S3], [5 x %class.S3]* @_ZL2ca, i32 0, i32 0), [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
|
|
// SIMD-ONLY0-NEXT: call void @_ZN2S3C1Ev(%class.S3* noundef nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
|
|
// SIMD-ONLY0-NEXT: [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[CLASS_S3:%.*]], %class.S3* [[ARRAYCTOR_CUR]], i64 1
|
|
// SIMD-ONLY0-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq %class.S3* [[ARRAYCTOR_NEXT]], getelementptr inbounds ([5 x %class.S3], [5 x %class.S3]* @_ZL2ca, i64 1, i64 0)
|
|
// SIMD-ONLY0-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
|
|
// SIMD-ONLY0: arrayctor.cont:
|
|
// SIMD-ONLY0-NEXT: ret void
|
|
//
|
|
//
|
|
// SIMD-ONLY0-LABEL: define {{[^@]+}}@__cxx_global_var_init.4
|
|
// SIMD-ONLY0-SAME: () #[[ATTR0]] {
|
|
// SIMD-ONLY0-NEXT: entry:
|
|
// SIMD-ONLY0-NEXT: call void @_ZN2S3C1Ev(%class.S3* noundef nonnull align 4 dereferenceable(4) @h)
|
|
// SIMD-ONLY0-NEXT: ret void
|
|
//
|
|
//
|
|
// SIMD-ONLY0-LABEL: define {{[^@]+}}@main
|
|
// SIMD-ONLY0-SAME: (i32 noundef signext [[ARGC:%.*]], i8** noundef [[ARGV:%.*]]) #[[ATTR2:[0-9]+]] {
|
|
// SIMD-ONLY0-NEXT: entry:
|
|
// SIMD-ONLY0-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
|
|
// SIMD-ONLY0-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
|
|
// SIMD-ONLY0-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
|
|
// SIMD-ONLY0-NEXT: [[DA:%.*]] = alloca [5 x i32], align 4
|
|
// SIMD-ONLY0-NEXT: [[H:%.*]] = alloca [10 x %struct.S6], align 4
|
|
// SIMD-ONLY0-NEXT: [[RH:%.*]] = alloca [10 x %struct.S6]*, align 8
|
|
// SIMD-ONLY0-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// SIMD-ONLY0-NEXT: [[J:%.*]] = alloca i32*, align 8
|
|
// SIMD-ONLY0-NEXT: [[K:%.*]] = alloca i32*, align 8
|
|
// SIMD-ONLY0-NEXT: [[Z:%.*]] = alloca i32**, align 8
|
|
// SIMD-ONLY0-NEXT: [[AA:%.*]] = alloca [10 x i32], align 4
|
|
// SIMD-ONLY0-NEXT: [[RAA:%.*]] = alloca [10 x i32]*, align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP:%.*]] = alloca i32**, align 8
|
|
// SIMD-ONLY0-NEXT: [[_TMP2:%.*]] = alloca [10 x i32]*, align 8
|
|
// SIMD-ONLY0-NEXT: [[A:%.*]] = alloca i32, align 4
|
|
// SIMD-ONLY0-NEXT: [[A4:%.*]] = alloca i32, align 4
|
|
// SIMD-ONLY0-NEXT: [[A7:%.*]] = alloca i32, align 4
|
|
// SIMD-ONLY0-NEXT: store i32 0, i32* [[RETVAL]], align 4
|
|
// SIMD-ONLY0-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
|
|
// SIMD-ONLY0-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP0:%.*]] = bitcast [5 x i32]* [[DA]] to i8*
|
|
// SIMD-ONLY0-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 20, i1 false)
|
|
// SIMD-ONLY0-NEXT: store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[RH]], align 8
|
|
// SIMD-ONLY0-NEXT: store i32* [[I]], i32** [[J]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP1:%.*]] = load i32*, i32** [[J]], align 8
|
|
// SIMD-ONLY0-NEXT: store i32* [[TMP1]], i32** [[K]], align 8
|
|
// SIMD-ONLY0-NEXT: store i32** [[K]], i32*** [[Z]], align 8
|
|
// SIMD-ONLY0-NEXT: store [10 x i32]* [[AA]], [10 x i32]** [[RAA]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP2:%.*]] = load i32*, i32** [[K]], align 8
|
|
// SIMD-ONLY0-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i32 1
|
|
// SIMD-ONLY0-NEXT: store i32* [[INCDEC_PTR]], i32** [[K]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[Z]], align 8
|
|
// SIMD-ONLY0-NEXT: store i32** [[TMP3]], i32*** [[TMP]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP4:%.*]] = load i32**, i32*** [[Z]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP5:%.*]] = load i32**, i32*** [[TMP]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP6:%.*]] = load i32*, i32** [[TMP5]], align 8
|
|
// SIMD-ONLY0-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[TMP6]], i32 1
|
|
// SIMD-ONLY0-NEXT: store i32* [[INCDEC_PTR1]], i32** [[TMP5]], align 8
|
|
// SIMD-ONLY0-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[AA]], i64 0, i64 0
|
|
// SIMD-ONLY0-NEXT: store i32 1, i32* [[ARRAYIDX]], align 4
|
|
// SIMD-ONLY0-NEXT: [[TMP7:%.*]] = load [10 x i32]*, [10 x i32]** [[RAA]], align 8
|
|
// SIMD-ONLY0-NEXT: store [10 x i32]* [[TMP7]], [10 x i32]** [[_TMP2]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP8:%.*]] = load [10 x i32]*, [10 x i32]** [[RAA]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP9:%.*]] = load [10 x i32]*, [10 x i32]** [[_TMP2]], align 8
|
|
// SIMD-ONLY0-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP9]], i64 0, i64 0
|
|
// SIMD-ONLY0-NEXT: [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX3]], align 4
|
|
// SIMD-ONLY0-NEXT: store i32 [[TMP10]], i32* [[A]], align 4
|
|
// SIMD-ONLY0-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [10 x %struct.S6], [10 x %struct.S6]* [[H]], i64 0, i64 1
|
|
// SIMD-ONLY0-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S6:%.*]], %struct.S6* [[ARRAYIDX5]], i32 0, i32 0
|
|
// SIMD-ONLY0-NEXT: [[TMP11:%.*]] = load i32, i32* [[A6]], align 4
|
|
// SIMD-ONLY0-NEXT: store i32 [[TMP11]], i32* [[A4]], align 4
|
|
// SIMD-ONLY0-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [5 x i32], [5 x i32]* [[DA]], i64 0, i64 1
|
|
// SIMD-ONLY0-NEXT: [[TMP12:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4
|
|
// SIMD-ONLY0-NEXT: store i32 [[TMP12]], i32* [[A7]], align 4
|
|
// SIMD-ONLY0-NEXT: [[TMP13:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
|
|
// SIMD-ONLY0-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_S0_(i32 noundef signext [[TMP13]])
|
|
// SIMD-ONLY0-NEXT: [[CALL9:%.*]] = call noundef i32* @_Z5tmainIPiET_S1_(i32* noundef [[ARGC_ADDR]])
|
|
// SIMD-ONLY0-NEXT: [[TMP14:%.*]] = load i32, i32* [[CALL9]], align 4
|
|
// SIMD-ONLY0-NEXT: [[ADD:%.*]] = add nsw i32 [[CALL]], [[TMP14]]
|
|
// SIMD-ONLY0-NEXT: ret i32 [[ADD]]
|
|
//
|
|
//
|
|
// SIMD-ONLY0-LABEL: define {{[^@]+}}@_Z5tmainIiET_S0_
|
|
// SIMD-ONLY0-SAME: (i32 noundef signext [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat {
|
|
// SIMD-ONLY0-NEXT: entry:
|
|
// SIMD-ONLY0-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
|
|
// SIMD-ONLY0-NEXT: [[DA:%.*]] = alloca [5 x i32], align 4
|
|
// SIMD-ONLY0-NEXT: [[H:%.*]] = alloca [10 x %struct.S6], align 4
|
|
// SIMD-ONLY0-NEXT: [[RH:%.*]] = alloca [10 x %struct.S6]*, align 8
|
|
// SIMD-ONLY0-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// SIMD-ONLY0-NEXT: [[J:%.*]] = alloca i32*, align 8
|
|
// SIMD-ONLY0-NEXT: [[K:%.*]] = alloca i32*, align 8
|
|
// SIMD-ONLY0-NEXT: [[Z:%.*]] = alloca i32**, align 8
|
|
// SIMD-ONLY0-NEXT: [[AA:%.*]] = alloca [10 x i32], align 4
|
|
// SIMD-ONLY0-NEXT: [[TMP:%.*]] = alloca i32**, align 8
|
|
// SIMD-ONLY0-NEXT: [[A:%.*]] = alloca i32, align 4
|
|
// SIMD-ONLY0-NEXT: [[A2:%.*]] = alloca i32, align 4
|
|
// SIMD-ONLY0-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
|
|
// SIMD-ONLY0-NEXT: [[TMP0:%.*]] = bitcast [5 x i32]* [[DA]] to i8*
|
|
// SIMD-ONLY0-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 20, i1 false)
|
|
// SIMD-ONLY0-NEXT: store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[RH]], align 8
|
|
// SIMD-ONLY0-NEXT: store i32* [[I]], i32** [[J]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP1:%.*]] = load i32*, i32** [[J]], align 8
|
|
// SIMD-ONLY0-NEXT: store i32* [[TMP1]], i32** [[K]], align 8
|
|
// SIMD-ONLY0-NEXT: store i32** [[K]], i32*** [[Z]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP2:%.*]] = load i32*, i32** [[K]], align 8
|
|
// SIMD-ONLY0-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i32 1
|
|
// SIMD-ONLY0-NEXT: store i32* [[INCDEC_PTR]], i32** [[K]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[Z]], align 8
|
|
// SIMD-ONLY0-NEXT: store i32** [[TMP3]], i32*** [[TMP]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP4:%.*]] = load i32**, i32*** [[Z]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP5:%.*]] = load i32**, i32*** [[TMP]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP6:%.*]] = load i32*, i32** [[TMP5]], align 8
|
|
// SIMD-ONLY0-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[TMP6]], i32 1
|
|
// SIMD-ONLY0-NEXT: store i32* [[INCDEC_PTR1]], i32** [[TMP5]], align 8
|
|
// SIMD-ONLY0-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[AA]], i64 0, i64 0
|
|
// SIMD-ONLY0-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
|
|
// SIMD-ONLY0-NEXT: store i32 [[TMP7]], i32* [[A]], align 4
|
|
// SIMD-ONLY0-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [10 x %struct.S6], [10 x %struct.S6]* [[H]], i64 0, i64 0
|
|
// SIMD-ONLY0-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S6:%.*]], %struct.S6* [[ARRAYIDX3]], i32 0, i32 0
|
|
// SIMD-ONLY0-NEXT: [[TMP8:%.*]] = load i32, i32* [[A4]], align 4
|
|
// SIMD-ONLY0-NEXT: store i32 [[TMP8]], i32* [[A2]], align 4
|
|
// SIMD-ONLY0-NEXT: ret i32 0
|
|
//
|
|
//
|
|
// SIMD-ONLY0-LABEL: define {{[^@]+}}@_Z5tmainIPiET_S1_
|
|
// SIMD-ONLY0-SAME: (i32* noundef [[ARGC:%.*]]) #[[ATTR4]] comdat {
|
|
// SIMD-ONLY0-NEXT: entry:
|
|
// SIMD-ONLY0-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8
|
|
// SIMD-ONLY0-NEXT: [[DA:%.*]] = alloca [5 x i32*], align 8
|
|
// SIMD-ONLY0-NEXT: [[H:%.*]] = alloca [10 x %struct.S6], align 4
|
|
// SIMD-ONLY0-NEXT: [[RH:%.*]] = alloca [10 x %struct.S6]*, align 8
|
|
// SIMD-ONLY0-NEXT: [[I:%.*]] = alloca i32*, align 8
|
|
// SIMD-ONLY0-NEXT: [[J:%.*]] = alloca i32**, align 8
|
|
// SIMD-ONLY0-NEXT: [[K:%.*]] = alloca i32**, align 8
|
|
// SIMD-ONLY0-NEXT: [[Z:%.*]] = alloca i32***, align 8
|
|
// SIMD-ONLY0-NEXT: [[AA:%.*]] = alloca [10 x i32*], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP:%.*]] = alloca i32***, align 8
|
|
// SIMD-ONLY0-NEXT: [[A:%.*]] = alloca i32*, align 8
|
|
// SIMD-ONLY0-NEXT: [[A2:%.*]] = alloca i32, align 4
|
|
// SIMD-ONLY0-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP0:%.*]] = bitcast [5 x i32*]* [[DA]] to i8*
|
|
// SIMD-ONLY0-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP0]], i8 0, i64 40, i1 false)
|
|
// SIMD-ONLY0-NEXT: store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[RH]], align 8
|
|
// SIMD-ONLY0-NEXT: store i32** [[I]], i32*** [[J]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[J]], align 8
|
|
// SIMD-ONLY0-NEXT: store i32** [[TMP1]], i32*** [[K]], align 8
|
|
// SIMD-ONLY0-NEXT: store i32*** [[K]], i32**** [[Z]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[K]], align 8
|
|
// SIMD-ONLY0-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32*, i32** [[TMP2]], i32 1
|
|
// SIMD-ONLY0-NEXT: store i32** [[INCDEC_PTR]], i32*** [[K]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP3:%.*]] = load i32***, i32**** [[Z]], align 8
|
|
// SIMD-ONLY0-NEXT: store i32*** [[TMP3]], i32**** [[TMP]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP4:%.*]] = load i32***, i32**** [[Z]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP5:%.*]] = load i32***, i32**** [[TMP]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP6:%.*]] = load i32**, i32*** [[TMP5]], align 8
|
|
// SIMD-ONLY0-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32*, i32** [[TMP6]], i32 1
|
|
// SIMD-ONLY0-NEXT: store i32** [[INCDEC_PTR1]], i32*** [[TMP5]], align 8
|
|
// SIMD-ONLY0-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32*], [10 x i32*]* [[AA]], i64 0, i64 0
|
|
// SIMD-ONLY0-NEXT: [[TMP7:%.*]] = load i32*, i32** [[ARRAYIDX]], align 8
|
|
// SIMD-ONLY0-NEXT: store i32* [[TMP7]], i32** [[A]], align 8
|
|
// SIMD-ONLY0-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [10 x %struct.S6], [10 x %struct.S6]* [[H]], i64 0, i64 0
|
|
// SIMD-ONLY0-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S6:%.*]], %struct.S6* [[ARRAYIDX3]], i32 0, i32 0
|
|
// SIMD-ONLY0-NEXT: [[TMP8:%.*]] = load i32, i32* [[A4]], align 4
|
|
// SIMD-ONLY0-NEXT: store i32 [[TMP8]], i32* [[A2]], align 4
|
|
// SIMD-ONLY0-NEXT: ret i32* null
|
|
//
|
|
//
|
|
// SIMD-ONLY0-LABEL: define {{[^@]+}}@_Z12use_templatev
|
|
// SIMD-ONLY0-SAME: () #[[ATTR4]] {
|
|
// SIMD-ONLY0-NEXT: entry:
|
|
// SIMD-ONLY0-NEXT: [[AKERN:%.*]] = alloca [[STRUCT_SOMEKERNEL:%.*]], align 4
|
|
// SIMD-ONLY0-NEXT: call void @_ZN10SomeKernelC1Ev(%struct.SomeKernel* noundef nonnull align 4 dereferenceable(8) [[AKERN]])
|
|
// SIMD-ONLY0-NEXT: call void @_ZN10SomeKernel5applyILj32EEEvv(%struct.SomeKernel* noundef nonnull align 4 dereferenceable(8) [[AKERN]])
|
|
// SIMD-ONLY0-NEXT: call void @_ZN10SomeKernelD1Ev(%struct.SomeKernel* noundef nonnull align 4 dereferenceable(8) [[AKERN]]) #[[ATTR7:[0-9]+]]
|
|
// SIMD-ONLY0-NEXT: ret void
|
|
//
|
|
//
|
|
// SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN10SomeKernel5applyILj32EEEvv
|
|
// SIMD-ONLY0-SAME: (%struct.SomeKernel* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) #[[ATTR4]] comdat align 2 {
|
|
// SIMD-ONLY0-NEXT: entry:
|
|
// SIMD-ONLY0-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SomeKernel*, align 8
|
|
// SIMD-ONLY0-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// SIMD-ONLY0-NEXT: store %struct.SomeKernel* [[THIS]], %struct.SomeKernel** [[THIS_ADDR]], align 8
|
|
// SIMD-ONLY0-NEXT: [[THIS1:%.*]] = load %struct.SomeKernel*, %struct.SomeKernel** [[THIS_ADDR]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TARGETDEV:%.*]] = getelementptr inbounds [[STRUCT_SOMEKERNEL:%.*]], %struct.SomeKernel* [[THIS1]], i32 0, i32 0
|
|
// SIMD-ONLY0-NEXT: [[TMP0:%.*]] = load i32, i32* [[TARGETDEV]], align 4
|
|
// SIMD-ONLY0-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
|
|
// SIMD-ONLY0-NEXT: [[DEVPTR:%.*]] = getelementptr inbounds [[STRUCT_SOMEKERNEL]], %struct.SomeKernel* [[THIS1]], i32 0, i32 1
|
|
// SIMD-ONLY0-NEXT: [[TMP1:%.*]] = load float, float* [[DEVPTR]], align 4
|
|
// SIMD-ONLY0-NEXT: [[INC:%.*]] = fadd float [[TMP1]], 1.000000e+00
|
|
// SIMD-ONLY0-NEXT: store float [[INC]], float* [[DEVPTR]], align 4
|
|
// SIMD-ONLY0-NEXT: [[TARGETDEV2:%.*]] = getelementptr inbounds [[STRUCT_SOMEKERNEL]], %struct.SomeKernel* [[THIS1]], i32 0, i32 0
|
|
// SIMD-ONLY0-NEXT: [[TMP2:%.*]] = load i32, i32* [[TARGETDEV2]], align 4
|
|
// SIMD-ONLY0-NEXT: [[INC3:%.*]] = add nsw i32 [[TMP2]], 1
|
|
// SIMD-ONLY0-NEXT: store i32 [[INC3]], i32* [[TARGETDEV2]], align 4
|
|
// SIMD-ONLY0-NEXT: ret void
|
|
//
|
|
//
|
|
// SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN2S2C2Ev
|
|
// SIMD-ONLY0-SAME: (%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
|
|
// SIMD-ONLY0-NEXT: entry:
|
|
// SIMD-ONLY0-NEXT: [[THIS_ADDR:%.*]] = alloca %class.S2*, align 8
|
|
// SIMD-ONLY0-NEXT: store %class.S2* [[THIS]], %class.S2** [[THIS_ADDR]], align 8
|
|
// SIMD-ONLY0-NEXT: [[THIS1:%.*]] = load %class.S2*, %class.S2** [[THIS_ADDR]], align 8
|
|
// SIMD-ONLY0-NEXT: [[A:%.*]] = getelementptr inbounds [[CLASS_S2:%.*]], %class.S2* [[THIS1]], i32 0, i32 0
|
|
// SIMD-ONLY0-NEXT: store i32 0, i32* [[A]], align 4
|
|
// SIMD-ONLY0-NEXT: ret void
|
|
//
|
|
//
|
|
// SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN2S3C2Ev
|
|
// SIMD-ONLY0-SAME: (%class.S3* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
|
|
// SIMD-ONLY0-NEXT: entry:
|
|
// SIMD-ONLY0-NEXT: [[THIS_ADDR:%.*]] = alloca %class.S3*, align 8
|
|
// SIMD-ONLY0-NEXT: store %class.S3* [[THIS]], %class.S3** [[THIS_ADDR]], align 8
|
|
// SIMD-ONLY0-NEXT: [[THIS1:%.*]] = load %class.S3*, %class.S3** [[THIS_ADDR]], align 8
|
|
// SIMD-ONLY0-NEXT: [[A:%.*]] = getelementptr inbounds [[CLASS_S3:%.*]], %class.S3* [[THIS1]], i32 0, i32 0
|
|
// SIMD-ONLY0-NEXT: store i32 0, i32* [[A]], align 4
|
|
// SIMD-ONLY0-NEXT: ret void
|
|
//
|
|
//
|
|
// SIMD-ONLY0-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_target_has_device_addr_codegen.cpp
|
|
// SIMD-ONLY0-SAME: () #[[ATTR0]] {
|
|
// SIMD-ONLY0-NEXT: entry:
|
|
// SIMD-ONLY0-NEXT: call void @__cxx_global_var_init()
|
|
// SIMD-ONLY0-NEXT: call void @__cxx_global_var_init.1()
|
|
// SIMD-ONLY0-NEXT: call void @__cxx_global_var_init.2()
|
|
// SIMD-ONLY0-NEXT: call void @__cxx_global_var_init.3()
|
|
// SIMD-ONLY0-NEXT: call void @__cxx_global_var_init.4()
|
|
// SIMD-ONLY0-NEXT: ret void
|
|
//
|