blob: cf01063c4de0c6ec5d6eec4c74f786470a1c32a0 [file] [log] [blame]
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple riscv64 -target-feature +zbc -emit-llvm %s -o - \
// RUN: | FileCheck %s -check-prefix=RV64ZBC
#include <stdint.h>
// RV64ZBC-LABEL: @clmul_64(
// RV64ZBC-NEXT: entry:
// RV64ZBC-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// RV64ZBC-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
// RV64ZBC-NEXT: store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
// RV64ZBC-NEXT: store i64 [[B:%.*]], ptr [[B_ADDR]], align 8
// RV64ZBC-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
// RV64ZBC-NEXT: [[TMP1:%.*]] = load i64, ptr [[B_ADDR]], align 8
// RV64ZBC-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.clmul.i64(i64 [[TMP0]], i64 [[TMP1]])
// RV64ZBC-NEXT: ret i64 [[TMP2]]
//
uint64_t clmul_64(uint64_t a, uint64_t b) {
return __builtin_riscv_clmul_64(a, b);
}
// RV64ZBC-LABEL: @clmulh_64(
// RV64ZBC-NEXT: entry:
// RV64ZBC-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// RV64ZBC-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
// RV64ZBC-NEXT: store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
// RV64ZBC-NEXT: store i64 [[B:%.*]], ptr [[B_ADDR]], align 8
// RV64ZBC-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
// RV64ZBC-NEXT: [[TMP1:%.*]] = load i64, ptr [[B_ADDR]], align 8
// RV64ZBC-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.clmulh.i64(i64 [[TMP0]], i64 [[TMP1]])
// RV64ZBC-NEXT: ret i64 [[TMP2]]
//
uint64_t clmulh_64(uint64_t a, uint64_t b) {
return __builtin_riscv_clmulh_64(a, b);
}
// RV64ZBC-LABEL: @clmulr_64(
// RV64ZBC-NEXT: entry:
// RV64ZBC-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// RV64ZBC-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
// RV64ZBC-NEXT: store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
// RV64ZBC-NEXT: store i64 [[B:%.*]], ptr [[B_ADDR]], align 8
// RV64ZBC-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
// RV64ZBC-NEXT: [[TMP1:%.*]] = load i64, ptr [[B_ADDR]], align 8
// RV64ZBC-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.clmulr.i64(i64 [[TMP0]], i64 [[TMP1]])
// RV64ZBC-NEXT: ret i64 [[TMP2]]
//
uint64_t clmulr_64(uint64_t a, uint64_t b) {
return __builtin_riscv_clmulr_64(a, b);
}
// RV64ZBC-LABEL: @clmul_32(
// RV64ZBC-NEXT: entry:
// RV64ZBC-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// RV64ZBC-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
// RV64ZBC-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
// RV64ZBC-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
// RV64ZBC-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// RV64ZBC-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
// RV64ZBC-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[TMP0]], i32 [[TMP1]])
// RV64ZBC-NEXT: ret i32 [[TMP2]]
//
uint32_t clmul_32(uint32_t a, uint32_t b) {
return __builtin_riscv_clmul_32(a, b);
}