324 lines
12 KiB
C
324 lines
12 KiB
C
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
|
// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-zbp -emit-llvm %s -o - \
|
|
// RUN: | FileCheck %s -check-prefix=RV64ZBP
|
|
|
|
// RV64ZBP-LABEL: @grev(
|
|
// RV64ZBP-NEXT: entry:
|
|
// RV64ZBP-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
|
|
// RV64ZBP-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
|
|
// RV64ZBP-NEXT: store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: store i64 [[RS2:%.*]], i64* [[RS2_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP1:%.*]] = load i64, i64* [[RS2_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.grev.i64(i64 [[TMP0]], i64 [[TMP1]])
|
|
// RV64ZBP-NEXT: ret i64 [[TMP2]]
|
|
//
|
|
long grev(long rs1, long rs2)
|
|
{
|
|
return __builtin_riscv_grev_64(rs1, rs2);
|
|
}
|
|
|
|
// RV64ZBP-LABEL: @grevi(
|
|
// RV64ZBP-NEXT: entry:
|
|
// RV64ZBP-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
|
|
// RV64ZBP-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// RV64ZBP-NEXT: store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: store i32 13, i32* [[I]], align 4
|
|
// RV64ZBP-NEXT: [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.grev.i64(i64 [[TMP0]], i64 13)
|
|
// RV64ZBP-NEXT: ret i64 [[TMP1]]
|
|
//
|
|
long grevi(long rs1)
|
|
{
|
|
const int i = 13;
|
|
return __builtin_riscv_grev_64(rs1, i);
|
|
}
|
|
|
|
// RV64ZBP-LABEL: @grevw(
|
|
// RV64ZBP-NEXT: entry:
|
|
// RV64ZBP-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
|
|
// RV64ZBP-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
|
|
// RV64ZBP-NEXT: store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
|
|
// RV64ZBP-NEXT: store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
|
|
// RV64ZBP-NEXT: [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
|
|
// RV64ZBP-NEXT: [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
|
|
// RV64ZBP-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.grev.i32(i32 [[TMP0]], i32 [[TMP1]])
|
|
// RV64ZBP-NEXT: ret i32 [[TMP2]]
|
|
//
|
|
int grevw(int rs1, int rs2)
|
|
{
|
|
return __builtin_riscv_grev_32(rs1, rs2);
|
|
}
|
|
|
|
// RV64ZBP-LABEL: @greviw(
|
|
// RV64ZBP-NEXT: entry:
|
|
// RV64ZBP-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
|
|
// RV64ZBP-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// RV64ZBP-NEXT: store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
|
|
// RV64ZBP-NEXT: store i32 13, i32* [[I]], align 4
|
|
// RV64ZBP-NEXT: [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
|
|
// RV64ZBP-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.grev.i32(i32 [[TMP0]], i32 13)
|
|
// RV64ZBP-NEXT: ret i32 [[TMP1]]
|
|
//
|
|
int greviw(int rs1)
|
|
{
|
|
const int i = 13;
|
|
return __builtin_riscv_grev_32(rs1, i);
|
|
}
|
|
|
|
// RV64ZBP-LABEL: @gorc(
|
|
// RV64ZBP-NEXT: entry:
|
|
// RV64ZBP-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
|
|
// RV64ZBP-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
|
|
// RV64ZBP-NEXT: store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: store i64 [[RS2:%.*]], i64* [[RS2_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP1:%.*]] = load i64, i64* [[RS2_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.gorc.i64(i64 [[TMP0]], i64 [[TMP1]])
|
|
// RV64ZBP-NEXT: ret i64 [[TMP2]]
|
|
//
|
|
long gorc(long rs1, long rs2)
|
|
{
|
|
return __builtin_riscv_gorc_64(rs1, rs2);
|
|
}
|
|
|
|
// RV64ZBP-LABEL: @gorci(
|
|
// RV64ZBP-NEXT: entry:
|
|
// RV64ZBP-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
|
|
// RV64ZBP-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// RV64ZBP-NEXT: store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: store i32 13, i32* [[I]], align 4
|
|
// RV64ZBP-NEXT: [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.gorc.i64(i64 [[TMP0]], i64 13)
|
|
// RV64ZBP-NEXT: ret i64 [[TMP1]]
|
|
//
|
|
long gorci(long rs1)
|
|
{
|
|
const int i = 13;
|
|
return __builtin_riscv_gorc_64(rs1, i);
|
|
}
|
|
|
|
// RV64ZBP-LABEL: @gorcw(
|
|
// RV64ZBP-NEXT: entry:
|
|
// RV64ZBP-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
|
|
// RV64ZBP-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
|
|
// RV64ZBP-NEXT: store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
|
|
// RV64ZBP-NEXT: store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
|
|
// RV64ZBP-NEXT: [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
|
|
// RV64ZBP-NEXT: [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
|
|
// RV64ZBP-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.gorc.i32(i32 [[TMP0]], i32 [[TMP1]])
|
|
// RV64ZBP-NEXT: ret i32 [[TMP2]]
|
|
//
|
|
int gorcw(int rs1, int rs2)
|
|
{
|
|
return __builtin_riscv_gorc_32(rs1, rs2);
|
|
}
|
|
|
|
// RV64ZBP-LABEL: @gorciw(
|
|
// RV64ZBP-NEXT: entry:
|
|
// RV64ZBP-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
|
|
// RV64ZBP-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// RV64ZBP-NEXT: store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
|
|
// RV64ZBP-NEXT: store i32 13, i32* [[I]], align 4
|
|
// RV64ZBP-NEXT: [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
|
|
// RV64ZBP-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.gorc.i32(i32 [[TMP0]], i32 13)
|
|
// RV64ZBP-NEXT: ret i32 [[TMP1]]
|
|
//
|
|
int gorciw(int rs1)
|
|
{
|
|
const int i = 13;
|
|
return __builtin_riscv_gorc_32(rs1, i);
|
|
}
|
|
|
|
// RV64ZBP-LABEL: @shfl(
|
|
// RV64ZBP-NEXT: entry:
|
|
// RV64ZBP-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
|
|
// RV64ZBP-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
|
|
// RV64ZBP-NEXT: store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: store i64 [[RS2:%.*]], i64* [[RS2_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP1:%.*]] = load i64, i64* [[RS2_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.shfl.i64(i64 [[TMP0]], i64 [[TMP1]])
|
|
// RV64ZBP-NEXT: ret i64 [[TMP2]]
|
|
//
|
|
long shfl(long rs1, long rs2)
|
|
{
|
|
return __builtin_riscv_shfl_64(rs1, rs2);
|
|
}
|
|
|
|
// RV64ZBP-LABEL: @shfli(
|
|
// RV64ZBP-NEXT: entry:
|
|
// RV64ZBP-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
|
|
// RV64ZBP-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// RV64ZBP-NEXT: store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: store i32 13, i32* [[I]], align 4
|
|
// RV64ZBP-NEXT: [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.shfl.i64(i64 [[TMP0]], i64 13)
|
|
// RV64ZBP-NEXT: ret i64 [[TMP1]]
|
|
//
|
|
long shfli(long rs1)
|
|
{
|
|
const int i = 13;
|
|
return __builtin_riscv_shfl_64(rs1, i);
|
|
}
|
|
|
|
// RV64ZBP-LABEL: @shflw(
|
|
// RV64ZBP-NEXT: entry:
|
|
// RV64ZBP-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
|
|
// RV64ZBP-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
|
|
// RV64ZBP-NEXT: store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
|
|
// RV64ZBP-NEXT: store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
|
|
// RV64ZBP-NEXT: [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
|
|
// RV64ZBP-NEXT: [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
|
|
// RV64ZBP-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.shfl.i32(i32 [[TMP0]], i32 [[TMP1]])
|
|
// RV64ZBP-NEXT: ret i32 [[TMP2]]
|
|
//
|
|
int shflw(int rs1, int rs2)
|
|
{
|
|
return __builtin_riscv_shfl_32(rs1, rs2);
|
|
}
|
|
|
|
// RV64ZBP-LABEL: @shfli_NOw(
|
|
// RV64ZBP-NEXT: entry:
|
|
// RV64ZBP-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
|
|
// RV64ZBP-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// RV64ZBP-NEXT: store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
|
|
// RV64ZBP-NEXT: store i32 13, i32* [[I]], align 4
|
|
// RV64ZBP-NEXT: [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
|
|
// RV64ZBP-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.shfl.i32(i32 [[TMP0]], i32 13)
|
|
// RV64ZBP-NEXT: ret i32 [[TMP1]]
|
|
//
|
|
int shfli_NOw(int rs1)
|
|
{
|
|
const int i = 13;
|
|
return __builtin_riscv_shfl_32(rs1, i);
|
|
}
|
|
|
|
// RV64ZBP-LABEL: @unshfl(
|
|
// RV64ZBP-NEXT: entry:
|
|
// RV64ZBP-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
|
|
// RV64ZBP-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
|
|
// RV64ZBP-NEXT: store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: store i64 [[RS2:%.*]], i64* [[RS2_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP1:%.*]] = load i64, i64* [[RS2_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.unshfl.i64(i64 [[TMP0]], i64 [[TMP1]])
|
|
// RV64ZBP-NEXT: ret i64 [[TMP2]]
|
|
//
|
|
long unshfl(long rs1, long rs2)
|
|
{
|
|
return __builtin_riscv_unshfl_64(rs1, rs2);
|
|
}
|
|
|
|
// RV64ZBP-LABEL: @unshfli(
|
|
// RV64ZBP-NEXT: entry:
|
|
// RV64ZBP-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
|
|
// RV64ZBP-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// RV64ZBP-NEXT: store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: store i32 13, i32* [[I]], align 4
|
|
// RV64ZBP-NEXT: [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.unshfl.i64(i64 [[TMP0]], i64 13)
|
|
// RV64ZBP-NEXT: ret i64 [[TMP1]]
|
|
//
|
|
long unshfli(long rs1)
|
|
{
|
|
const int i = 13;
|
|
return __builtin_riscv_unshfl_64(rs1, i);
|
|
}
|
|
|
|
// RV64ZBP-LABEL: @unshflw(
|
|
// RV64ZBP-NEXT: entry:
|
|
// RV64ZBP-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
|
|
// RV64ZBP-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
|
|
// RV64ZBP-NEXT: store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
|
|
// RV64ZBP-NEXT: store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
|
|
// RV64ZBP-NEXT: [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
|
|
// RV64ZBP-NEXT: [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
|
|
// RV64ZBP-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.unshfl.i32(i32 [[TMP0]], i32 [[TMP1]])
|
|
// RV64ZBP-NEXT: ret i32 [[TMP2]]
|
|
//
|
|
int unshflw(int rs1, int rs2)
|
|
{
|
|
return __builtin_riscv_unshfl_32(rs1, rs2);
|
|
}
|
|
|
|
// RV64ZBP-LABEL: @unshfli_NOw(
|
|
// RV64ZBP-NEXT: entry:
|
|
// RV64ZBP-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
|
|
// RV64ZBP-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// RV64ZBP-NEXT: store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
|
|
// RV64ZBP-NEXT: store i32 13, i32* [[I]], align 4
|
|
// RV64ZBP-NEXT: [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
|
|
// RV64ZBP-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.unshfl.i32(i32 [[TMP0]], i32 13)
|
|
// RV64ZBP-NEXT: ret i32 [[TMP1]]
|
|
//
|
|
int unshfli_NOw(int rs1)
|
|
{
|
|
const int i = 13;
|
|
return __builtin_riscv_unshfl_32(rs1, i);
|
|
}
|
|
|
|
// RV64ZBP-LABEL: @xperm_n(
|
|
// RV64ZBP-NEXT: entry:
|
|
// RV64ZBP-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
|
|
// RV64ZBP-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
|
|
// RV64ZBP-NEXT: store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: store i64 [[RS2:%.*]], i64* [[RS2_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP1:%.*]] = load i64, i64* [[RS2_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.xperm.n.i64(i64 [[TMP0]], i64 [[TMP1]])
|
|
// RV64ZBP-NEXT: ret i64 [[TMP2]]
|
|
//
|
|
long xperm_n(long rs1, long rs2)
|
|
{
|
|
return __builtin_riscv_xperm_n(rs1, rs2);
|
|
}
|
|
|
|
// RV64ZBP-LABEL: @xperm_b(
|
|
// RV64ZBP-NEXT: entry:
|
|
// RV64ZBP-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
|
|
// RV64ZBP-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
|
|
// RV64ZBP-NEXT: store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: store i64 [[RS2:%.*]], i64* [[RS2_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP1:%.*]] = load i64, i64* [[RS2_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.xperm.b.i64(i64 [[TMP0]], i64 [[TMP1]])
|
|
// RV64ZBP-NEXT: ret i64 [[TMP2]]
|
|
//
|
|
long xperm_b(long rs1, long rs2)
|
|
{
|
|
return __builtin_riscv_xperm_b(rs1, rs2);
|
|
}
|
|
|
|
// RV64ZBP-LABEL: @xperm_h(
|
|
// RV64ZBP-NEXT: entry:
|
|
// RV64ZBP-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
|
|
// RV64ZBP-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
|
|
// RV64ZBP-NEXT: store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: store i64 [[RS2:%.*]], i64* [[RS2_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP1:%.*]] = load i64, i64* [[RS2_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.xperm.h.i64(i64 [[TMP0]], i64 [[TMP1]])
|
|
// RV64ZBP-NEXT: ret i64 [[TMP2]]
|
|
//
|
|
long xperm_h(long rs1, long rs2)
|
|
{
|
|
return __builtin_riscv_xperm_h(rs1, rs2);
|
|
}
|
|
|
|
// RV64ZBP-LABEL: @xperm_w(
|
|
// RV64ZBP-NEXT: entry:
|
|
// RV64ZBP-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
|
|
// RV64ZBP-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
|
|
// RV64ZBP-NEXT: store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: store i64 [[RS2:%.*]], i64* [[RS2_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP1:%.*]] = load i64, i64* [[RS2_ADDR]], align 8
|
|
// RV64ZBP-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.xperm.w.i64(i64 [[TMP0]], i64 [[TMP1]])
|
|
// RV64ZBP-NEXT: ret i64 [[TMP2]]
|
|
//
|
|
long xperm_w(long rs1, long rs2)
|
|
{
|
|
return __builtin_riscv_xperm_w(rs1, rs2);
|
|
}
|