467 lines
33 KiB
C++
467 lines
33 KiB
C++
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" --prefix-filecheck-ir-name _
|
|
// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -verify -fopenmp -fopenmp-version=51 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s
|
|
|
|
// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -verify -fopenmp-simd -fopenmp-version=51 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
|
|
// expected-no-diagnostics
|
|
|
|
struct S {
|
|
int a = 0;
|
|
int *ptr = &a;
|
|
int &ref = a;
|
|
int arr[4];
|
|
S() {}
|
|
void foo() {
|
|
#pragma omp target has_device_addr(a, ref, ptr[0:4], arr[:a])
|
|
++a, ++*ptr, ++ref, ++arr[0];
|
|
}
|
|
};
|
|
|
|
int main() {
|
|
float a = 0;
|
|
float *ptr = &a;
|
|
float &ref = a;
|
|
float arr[4];
|
|
float vla[(int)a];
|
|
S s;
|
|
s.foo();
|
|
#pragma omp target has_device_addr(a, ref, ptr[0:4], arr[:(int)a], vla[0])
|
|
++a, ++*ptr, ++ref, ++arr[0], ++vla[0];
|
|
return a;
|
|
}
|
|
|
|
|
|
// CHECK-LABEL: define {{[^@]+}}@main
|
|
// CHECK-SAME: () #[[ATTR0:[0-9]+]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
|
|
// CHECK-NEXT: [[A:%.*]] = alloca float, align 4
|
|
// CHECK-NEXT: [[PTR:%.*]] = alloca float*, align 8
|
|
// CHECK-NEXT: [[REF:%.*]] = alloca float*, align 8
|
|
// CHECK-NEXT: [[ARR:%.*]] = alloca [4 x float], align 4
|
|
// CHECK-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
|
|
// CHECK-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
|
|
// CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
|
|
// CHECK-NEXT: [[TMP:%.*]] = alloca float*, align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 8
|
|
// CHECK-NEXT: store i32 0, i32* [[RETVAL]], align 4
|
|
// CHECK-NEXT: store float 0.000000e+00, float* [[A]], align 4
|
|
// CHECK-NEXT: store float* [[A]], float** [[PTR]], align 8
|
|
// CHECK-NEXT: store float* [[A]], float** [[REF]], align 8
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[A]], align 4
|
|
// CHECK-NEXT: [[CONV:%.*]] = fptosi float [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[CONV]] to i64
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
|
|
// CHECK-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
|
|
// CHECK-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
|
|
// CHECK-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
|
|
// CHECK-NEXT: call void @_ZN1SC1Ev(%struct.S* noundef nonnull align 8 dereferenceable(40) [[S]])
|
|
// CHECK-NEXT: call void @_ZN1S3fooEv(%struct.S* noundef nonnull align 8 dereferenceable(40) [[S]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = load float*, float** [[REF]], align 8
|
|
// CHECK-NEXT: store float* [[TMP3]], float** [[TMP]], align 8
|
|
// CHECK-NEXT: [[TMP4:%.*]] = load float*, float** [[PTR]], align 8
|
|
// CHECK-NEXT: [[TMP5:%.*]] = load float*, float** [[TMP]], align 8
|
|
// CHECK-NEXT: [[TMP6:%.*]] = load float*, float** [[PTR]], align 8
|
|
// CHECK-NEXT: [[TMP7:%.*]] = load float*, float** [[TMP]], align 8
|
|
// CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x float], [4 x float]* [[ARR]], i64 0, i64 0
|
|
// CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to float**
|
|
// CHECK-NEXT: store float* [[A]], float** [[TMP9]], align 8
|
|
// CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to float**
|
|
// CHECK-NEXT: store float* [[A]], float** [[TMP11]], align 8
|
|
// CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP12]], align 8
|
|
// CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
|
|
// CHECK-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to float**
|
|
// CHECK-NEXT: store float* [[TMP6]], float** [[TMP14]], align 8
|
|
// CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
|
|
// CHECK-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to float**
|
|
// CHECK-NEXT: store float* [[TMP6]], float** [[TMP16]], align 8
|
|
// CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP17]], align 8
|
|
// CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
|
|
// CHECK-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to float**
|
|
// CHECK-NEXT: store float* [[TMP7]], float** [[TMP19]], align 8
|
|
// CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
|
|
// CHECK-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to float**
|
|
// CHECK-NEXT: store float* [[TMP7]], float** [[TMP21]], align 8
|
|
// CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP22]], align 8
|
|
// CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
|
|
// CHECK-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to float**
|
|
// CHECK-NEXT: store float* [[ARRAYDECAY]], float** [[TMP24]], align 8
|
|
// CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
|
|
// CHECK-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to float**
|
|
// CHECK-NEXT: store float* [[ARRAYDECAY]], float** [[TMP26]], align 8
|
|
// CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP27]], align 8
|
|
// CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
|
|
// CHECK-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
|
|
// CHECK-NEXT: store i64 [[TMP1]], i64* [[TMP29]], align 8
|
|
// CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
|
|
// CHECK-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64*
|
|
// CHECK-NEXT: store i64 [[TMP1]], i64* [[TMP31]], align 8
|
|
// CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP32]], align 8
|
|
// CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5
|
|
// CHECK-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to float**
|
|
// CHECK-NEXT: store float* [[VLA]], float** [[TMP34]], align 8
|
|
// CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5
|
|
// CHECK-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to float**
|
|
// CHECK-NEXT: store float* [[VLA]], float** [[TMP36]], align 8
|
|
// CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP37]], align 8
|
|
// CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
|
|
// CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP40]], align 4
|
|
// CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
|
|
// CHECK-NEXT: store i32 6, i32* [[TMP41]], align 4
|
|
// CHECK-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
|
|
// CHECK-NEXT: store i8** [[TMP38]], i8*** [[TMP42]], align 8
|
|
// CHECK-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
|
|
// CHECK-NEXT: store i8** [[TMP39]], i8*** [[TMP43]], align 8
|
|
// CHECK-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP44]], align 8
|
|
// CHECK-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP45]], align 8
|
|
// CHECK-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP46]], align 8
|
|
// CHECK-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP47]], align 8
|
|
// CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
|
|
// CHECK-NEXT: store i64 0, i64* [[TMP48]], align 8
|
|
// CHECK-NEXT: [[TMP49:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
|
|
// CHECK-NEXT: [[TMP50:%.*]] = icmp ne i32 [[TMP49]], 0
|
|
// CHECK-NEXT: br i1 [[TMP50]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
|
|
// CHECK: omp_offload.failed:
|
|
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27(float* [[A]], float* [[TMP4]], float* [[TMP5]], [4 x float]* [[ARR]], i64 [[TMP1]], float* [[VLA]]) #[[ATTR5:[0-9]+]]
|
|
// CHECK-NEXT: br label [[OMP_OFFLOAD_CONT]]
|
|
// CHECK: omp_offload.cont:
|
|
// CHECK-NEXT: [[TMP51:%.*]] = load float, float* [[A]], align 4
|
|
// CHECK-NEXT: [[CONV1:%.*]] = fptosi float [[TMP51]] to i32
|
|
// CHECK-NEXT: store i32 [[CONV1]], i32* [[RETVAL]], align 4
|
|
// CHECK-NEXT: [[TMP52:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
|
|
// CHECK-NEXT: call void @llvm.stackrestore(i8* [[TMP52]])
|
|
// CHECK-NEXT: [[TMP53:%.*]] = load i32, i32* [[RETVAL]], align 4
|
|
// CHECK-NEXT: ret i32 [[TMP53]]
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@_ZN1SC1Ev
|
|
// CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
// CHECK-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
|
// CHECK-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
|
// CHECK-NEXT: call void @_ZN1SC2Ev(%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS1]])
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@_ZN1S3fooEv
|
|
// CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) #[[ATTR3:[0-9]+]] comdat align 2 {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8
|
|
// CHECK-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8
|
|
// CHECK-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
|
// CHECK-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
|
// CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
|
|
// CHECK-NEXT: [[REF:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 2
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[REF]], align 8
|
|
// CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 1
|
|
// CHECK-NEXT: [[ARR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 3
|
|
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr [4 x i32], [4 x i32]* [[ARR]], i32 1
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i32* [[A]] to i8*
|
|
// CHECK-NEXT: [[TMP3:%.*]] = bitcast [4 x i32]* [[TMP1]] to i8*
|
|
// CHECK-NEXT: [[TMP4:%.*]] = ptrtoint i8* [[TMP3]] to i64
|
|
// CHECK-NEXT: [[TMP5:%.*]] = ptrtoint i8* [[TMP2]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[TMP4]], [[TMP5]]
|
|
// CHECK-NEXT: [[TMP7:%.*]] = sdiv exact i64 [[TMP6]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
|
|
// CHECK-NEXT: [[TMP8:%.*]] = bitcast [5 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
|
|
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP8]], i8* align 8 bitcast ([5 x i64]* @.offload_sizes.1 to i8*), i64 40, i1 false)
|
|
// CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.S**
|
|
// CHECK-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP10]], align 8
|
|
// CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32**
|
|
// CHECK-NEXT: store i32* [[A]], i32** [[TMP12]], align 8
|
|
// CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
|
|
// CHECK-NEXT: store i64 [[TMP7]], i64* [[TMP13]], align 8
|
|
// CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP14]], align 8
|
|
// CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
|
|
// CHECK-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to %struct.S**
|
|
// CHECK-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP16]], align 8
|
|
// CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
|
|
// CHECK-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32**
|
|
// CHECK-NEXT: store i32* [[A]], i32** [[TMP18]], align 8
|
|
// CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP19]], align 8
|
|
// CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
|
|
// CHECK-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to %struct.S**
|
|
// CHECK-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP21]], align 8
|
|
// CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
|
|
// CHECK-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32**
|
|
// CHECK-NEXT: store i32* [[TMP0]], i32** [[TMP23]], align 8
|
|
// CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP24]], align 8
|
|
// CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
|
|
// CHECK-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to %struct.S**
|
|
// CHECK-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP26]], align 8
|
|
// CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
|
|
// CHECK-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32***
|
|
// CHECK-NEXT: store i32** [[PTR]], i32*** [[TMP28]], align 8
|
|
// CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP29]], align 8
|
|
// CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
|
|
// CHECK-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to %struct.S**
|
|
// CHECK-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP31]], align 8
|
|
// CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
|
|
// CHECK-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to [4 x i32]**
|
|
// CHECK-NEXT: store [4 x i32]* [[ARR]], [4 x i32]** [[TMP33]], align 8
|
|
// CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
|
|
// CHECK-NEXT: store i8* null, i8** [[TMP34]], align 8
|
|
// CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
|
|
// CHECK-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
|
|
// CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
|
|
// CHECK-NEXT: store i32 1, i32* [[TMP38]], align 4
|
|
// CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
|
|
// CHECK-NEXT: store i32 5, i32* [[TMP39]], align 4
|
|
// CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
|
|
// CHECK-NEXT: store i8** [[TMP35]], i8*** [[TMP40]], align 8
|
|
// CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
|
|
// CHECK-NEXT: store i8** [[TMP36]], i8*** [[TMP41]], align 8
|
|
// CHECK-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
|
|
// CHECK-NEXT: store i64* [[TMP37]], i64** [[TMP42]], align 8
|
|
// CHECK-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
|
|
// CHECK-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.2, i32 0, i32 0), i64** [[TMP43]], align 8
|
|
// CHECK-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP44]], align 8
|
|
// CHECK-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
|
|
// CHECK-NEXT: store i8** null, i8*** [[TMP45]], align 8
|
|
// CHECK-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
|
|
// CHECK-NEXT: store i64 0, i64* [[TMP46]], align 8
|
|
// CHECK-NEXT: [[TMP47:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l14.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
|
|
// CHECK-NEXT: [[TMP48:%.*]] = icmp ne i32 [[TMP47]], 0
|
|
// CHECK-NEXT: br i1 [[TMP48]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
|
|
// CHECK: omp_offload.failed:
|
|
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l14(%struct.S* [[THIS1]]) #[[ATTR5]]
|
|
// CHECK-NEXT: br label [[OMP_OFFLOAD_CONT]]
|
|
// CHECK: omp_offload.cont:
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27
|
|
// CHECK-SAME: (float* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], float* noundef [[PTR:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[REF:%.*]], [4 x float]* noundef nonnull align 4 dereferenceable(16) [[ARR:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[VLA1:%.*]]) #[[ATTR4:[0-9]+]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK-NEXT: [[REF_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK-NEXT: [[ARR_ADDR:%.*]] = alloca [4 x float]*, align 8
|
|
// CHECK-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK-NEXT: [[VLA_ADDR2:%.*]] = alloca float*, align 8
|
|
// CHECK-NEXT: [[TMP:%.*]] = alloca float*, align 8
|
|
// CHECK-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK-NEXT: store float* [[PTR]], float** [[PTR_ADDR]], align 8
|
|
// CHECK-NEXT: store float* [[REF]], float** [[REF_ADDR]], align 8
|
|
// CHECK-NEXT: store [4 x float]* [[ARR]], [4 x float]** [[ARR_ADDR]], align 8
|
|
// CHECK-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
|
|
// CHECK-NEXT: store float* [[VLA1]], float** [[VLA_ADDR2]], align 8
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load float*, float** [[A_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load float*, float** [[REF_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP2:%.*]] = load [4 x float]*, [4 x float]** [[ARR_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP3:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP4:%.*]] = load float*, float** [[VLA_ADDR2]], align 8
|
|
// CHECK-NEXT: store float* [[TMP1]], float** [[TMP]], align 8
|
|
// CHECK-NEXT: [[TMP5:%.*]] = load float, float* [[TMP0]], align 4
|
|
// CHECK-NEXT: [[INC:%.*]] = fadd float [[TMP5]], 1.000000e+00
|
|
// CHECK-NEXT: store float [[INC]], float* [[TMP0]], align 4
|
|
// CHECK-NEXT: [[TMP6:%.*]] = load float*, float** [[PTR_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP7:%.*]] = load float, float* [[TMP6]], align 4
|
|
// CHECK-NEXT: [[INC3:%.*]] = fadd float [[TMP7]], 1.000000e+00
|
|
// CHECK-NEXT: store float [[INC3]], float* [[TMP6]], align 4
|
|
// CHECK-NEXT: [[TMP8:%.*]] = load float*, float** [[TMP]], align 8
|
|
// CHECK-NEXT: [[TMP9:%.*]] = load float, float* [[TMP8]], align 4
|
|
// CHECK-NEXT: [[INC4:%.*]] = fadd float [[TMP9]], 1.000000e+00
|
|
// CHECK-NEXT: store float [[INC4]], float* [[TMP8]], align 4
|
|
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x float], [4 x float]* [[TMP2]], i64 0, i64 0
|
|
// CHECK-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
|
// CHECK-NEXT: [[INC5:%.*]] = fadd float [[TMP10]], 1.000000e+00
|
|
// CHECK-NEXT: store float [[INC5]], float* [[ARRAYIDX]], align 4
|
|
// CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP4]], i64 0
|
|
// CHECK-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX6]], align 4
|
|
// CHECK-NEXT: [[INC7:%.*]] = fadd float [[TMP11]], 1.000000e+00
|
|
// CHECK-NEXT: store float [[INC7]], float* [[ARRAYIDX6]], align 4
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@_ZN1SC2Ev
|
|
// CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
// CHECK-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
|
// CHECK-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
|
// CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
|
|
// CHECK-NEXT: store i32 0, i32* [[A]], align 8
|
|
// CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 1
|
|
// CHECK-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 0
|
|
// CHECK-NEXT: store i32* [[A2]], i32** [[PTR]], align 8
|
|
// CHECK-NEXT: [[REF:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 2
|
|
// CHECK-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 0
|
|
// CHECK-NEXT: store i32* [[A3]], i32** [[REF]], align 8
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l14
|
|
// CHECK-SAME: (%struct.S* noundef [[THIS:%.*]]) #[[ATTR4]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
// CHECK-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
|
// CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[TMP0]], i32 0, i32 0
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 8
|
|
// CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
|
|
// CHECK-NEXT: store i32 [[INC]], i32* [[A]], align 8
|
|
// CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 1
|
|
// CHECK-NEXT: [[TMP2:%.*]] = load i32*, i32** [[PTR]], align 8
|
|
// CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
|
|
// CHECK-NEXT: [[INC1:%.*]] = add nsw i32 [[TMP3]], 1
|
|
// CHECK-NEXT: store i32 [[INC1]], i32* [[TMP2]], align 4
|
|
// CHECK-NEXT: [[REF:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 2
|
|
// CHECK-NEXT: [[TMP4:%.*]] = load i32*, i32** [[REF]], align 8
|
|
// CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK-NEXT: [[INC2:%.*]] = add nsw i32 [[TMP5]], 1
|
|
// CHECK-NEXT: store i32 [[INC2]], i32* [[TMP4]], align 4
|
|
// CHECK-NEXT: [[ARR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 3
|
|
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0
|
|
// CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 8
|
|
// CHECK-NEXT: [[INC3:%.*]] = add nsw i32 [[TMP6]], 1
|
|
// CHECK-NEXT: store i32 [[INC3]], i32* [[ARRAYIDX]], align 8
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
|
|
// CHECK-SAME: () #[[ATTR7:[0-9]+]] {
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: call void @__tgt_register_requires(i64 1)
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
//
|
|
// SIMD-ONLY0-LABEL: define {{[^@]+}}@main
|
|
// SIMD-ONLY0-SAME: () #[[ATTR0:[0-9]+]] {
|
|
// SIMD-ONLY0-NEXT: entry:
|
|
// SIMD-ONLY0-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
|
|
// SIMD-ONLY0-NEXT: [[A:%.*]] = alloca float, align 4
|
|
// SIMD-ONLY0-NEXT: [[PTR:%.*]] = alloca float*, align 8
|
|
// SIMD-ONLY0-NEXT: [[REF:%.*]] = alloca float*, align 8
|
|
// SIMD-ONLY0-NEXT: [[ARR:%.*]] = alloca [4 x float], align 4
|
|
// SIMD-ONLY0-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
|
|
// SIMD-ONLY0-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
|
|
// SIMD-ONLY0-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP:%.*]] = alloca float*, align 8
|
|
// SIMD-ONLY0-NEXT: store i32 0, i32* [[RETVAL]], align 4
|
|
// SIMD-ONLY0-NEXT: store float 0.000000e+00, float* [[A]], align 4
|
|
// SIMD-ONLY0-NEXT: store float* [[A]], float** [[PTR]], align 8
|
|
// SIMD-ONLY0-NEXT: store float* [[A]], float** [[REF]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP0:%.*]] = load float, float* [[A]], align 4
|
|
// SIMD-ONLY0-NEXT: [[CONV:%.*]] = fptosi float [[TMP0]] to i32
|
|
// SIMD-ONLY0-NEXT: [[TMP1:%.*]] = zext i32 [[CONV]] to i64
|
|
// SIMD-ONLY0-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
|
|
// SIMD-ONLY0-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
|
|
// SIMD-ONLY0-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
|
|
// SIMD-ONLY0-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
|
|
// SIMD-ONLY0-NEXT: call void @_ZN1SC1Ev(%struct.S* noundef nonnull align 8 dereferenceable(40) [[S]])
|
|
// SIMD-ONLY0-NEXT: call void @_ZN1S3fooEv(%struct.S* noundef nonnull align 8 dereferenceable(40) [[S]])
|
|
// SIMD-ONLY0-NEXT: [[TMP3:%.*]] = load float*, float** [[REF]], align 8
|
|
// SIMD-ONLY0-NEXT: store float* [[TMP3]], float** [[TMP]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP4:%.*]] = load float*, float** [[REF]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP5:%.*]] = load float, float* [[A]], align 4
|
|
// SIMD-ONLY0-NEXT: [[INC:%.*]] = fadd float [[TMP5]], 1.000000e+00
|
|
// SIMD-ONLY0-NEXT: store float [[INC]], float* [[A]], align 4
|
|
// SIMD-ONLY0-NEXT: [[TMP6:%.*]] = load float*, float** [[PTR]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP7:%.*]] = load float, float* [[TMP6]], align 4
|
|
// SIMD-ONLY0-NEXT: [[INC1:%.*]] = fadd float [[TMP7]], 1.000000e+00
|
|
// SIMD-ONLY0-NEXT: store float [[INC1]], float* [[TMP6]], align 4
|
|
// SIMD-ONLY0-NEXT: [[TMP8:%.*]] = load float*, float** [[TMP]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP9:%.*]] = load float, float* [[TMP8]], align 4
|
|
// SIMD-ONLY0-NEXT: [[INC2:%.*]] = fadd float [[TMP9]], 1.000000e+00
|
|
// SIMD-ONLY0-NEXT: store float [[INC2]], float* [[TMP8]], align 4
|
|
// SIMD-ONLY0-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x float], [4 x float]* [[ARR]], i64 0, i64 0
|
|
// SIMD-ONLY0-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
|
// SIMD-ONLY0-NEXT: [[INC3:%.*]] = fadd float [[TMP10]], 1.000000e+00
|
|
// SIMD-ONLY0-NEXT: store float [[INC3]], float* [[ARRAYIDX]], align 4
|
|
// SIMD-ONLY0-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 0
|
|
// SIMD-ONLY0-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX4]], align 4
|
|
// SIMD-ONLY0-NEXT: [[INC5:%.*]] = fadd float [[TMP11]], 1.000000e+00
|
|
// SIMD-ONLY0-NEXT: store float [[INC5]], float* [[ARRAYIDX4]], align 4
|
|
// SIMD-ONLY0-NEXT: [[TMP12:%.*]] = load float, float* [[A]], align 4
|
|
// SIMD-ONLY0-NEXT: [[CONV6:%.*]] = fptosi float [[TMP12]] to i32
|
|
// SIMD-ONLY0-NEXT: store i32 [[CONV6]], i32* [[RETVAL]], align 4
|
|
// SIMD-ONLY0-NEXT: [[TMP13:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
|
|
// SIMD-ONLY0-NEXT: call void @llvm.stackrestore(i8* [[TMP13]])
|
|
// SIMD-ONLY0-NEXT: [[TMP14:%.*]] = load i32, i32* [[RETVAL]], align 4
|
|
// SIMD-ONLY0-NEXT: ret i32 [[TMP14]]
|
|
//
|
|
//
|
|
// SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SC1Ev
|
|
// SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 {
|
|
// SIMD-ONLY0-NEXT: entry:
|
|
// SIMD-ONLY0-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
// SIMD-ONLY0-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
|
// SIMD-ONLY0-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
|
// SIMD-ONLY0-NEXT: call void @_ZN1SC2Ev(%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS1]])
|
|
// SIMD-ONLY0-NEXT: ret void
|
|
//
|
|
//
|
|
// SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1S3fooEv
|
|
// SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) #[[ATTR3:[0-9]+]] comdat align 2 {
|
|
// SIMD-ONLY0-NEXT: entry:
|
|
// SIMD-ONLY0-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
// SIMD-ONLY0-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
|
// SIMD-ONLY0-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
|
// SIMD-ONLY0-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
|
|
// SIMD-ONLY0-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 8
|
|
// SIMD-ONLY0-NEXT: [[INC:%.*]] = add nsw i32 [[TMP0]], 1
|
|
// SIMD-ONLY0-NEXT: store i32 [[INC]], i32* [[A]], align 8
|
|
// SIMD-ONLY0-NEXT: [[PTR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 1
|
|
// SIMD-ONLY0-NEXT: [[TMP1:%.*]] = load i32*, i32** [[PTR]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
|
|
// SIMD-ONLY0-NEXT: [[INC2:%.*]] = add nsw i32 [[TMP2]], 1
|
|
// SIMD-ONLY0-NEXT: store i32 [[INC2]], i32* [[TMP1]], align 4
|
|
// SIMD-ONLY0-NEXT: [[REF:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 2
|
|
// SIMD-ONLY0-NEXT: [[TMP3:%.*]] = load i32*, i32** [[REF]], align 8
|
|
// SIMD-ONLY0-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
|
|
// SIMD-ONLY0-NEXT: [[INC3:%.*]] = add nsw i32 [[TMP4]], 1
|
|
// SIMD-ONLY0-NEXT: store i32 [[INC3]], i32* [[TMP3]], align 4
|
|
// SIMD-ONLY0-NEXT: [[ARR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 3
|
|
// SIMD-ONLY0-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0
|
|
// SIMD-ONLY0-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX]], align 8
|
|
// SIMD-ONLY0-NEXT: [[INC4:%.*]] = add nsw i32 [[TMP5]], 1
|
|
// SIMD-ONLY0-NEXT: store i32 [[INC4]], i32* [[ARRAYIDX]], align 8
|
|
// SIMD-ONLY0-NEXT: ret void
|
|
//
|
|
//
|
|
// SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SC2Ev
|
|
// SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 {
|
|
// SIMD-ONLY0-NEXT: entry:
|
|
// SIMD-ONLY0-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
// SIMD-ONLY0-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
|
// SIMD-ONLY0-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
|
// SIMD-ONLY0-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
|
|
// SIMD-ONLY0-NEXT: store i32 0, i32* [[A]], align 8
|
|
// SIMD-ONLY0-NEXT: [[PTR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 1
|
|
// SIMD-ONLY0-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 0
|
|
// SIMD-ONLY0-NEXT: store i32* [[A2]], i32** [[PTR]], align 8
|
|
// SIMD-ONLY0-NEXT: [[REF:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 2
|
|
// SIMD-ONLY0-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 0
|
|
// SIMD-ONLY0-NEXT: store i32* [[A3]], i32** [[REF]], align 8
|
|
// SIMD-ONLY0-NEXT: ret void
|
|
//
|