blob: 9b90525115528feb4e036eeca4fab454c44c8923 [file] [log] [blame]
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/leaky_relu_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void LeakyReluKernel(const int N, const T alpha, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = X[i] >= 0 ? X[i] : X[i] * alpha;
}
}
template <typename T>
__global__ void LeakyReluGradientKernel(
const int N,
const T alpha,
const T* X,
const T* dY,
T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = X[i] >= 0 ? dY[i] : dY[i] * alpha;
}
}
} // namespace
template <>
bool LeakyReluOp<float, CUDAContext>::RunOnDevice() {
const auto& X = Input(0);
CAFFE_ENFORCE_GT(X.size(), 0);
auto* Y = Output(0);
Y->ResizeLike(X);
LeakyReluKernel<<<
CAFFE_GET_BLOCKS(X.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.size(), alpha_, X.data<float>(), Y->mutable_data<float>());
return true;
}
template <>
bool LeakyReluGradientOp<float, CUDAContext>::RunOnDevice() {
const auto& X = Input(0);
const auto& dY = Input(1);
auto* dX = Output(0);
dX->ResizeLike(X);
CAFFE_ENFORCE_EQ(X.size(), dY.size());
LeakyReluGradientKernel<<<
CAFFE_GET_BLOCKS(X.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.size(),
alpha_,
X.data<float>(),
dY.data<float>(),
dX->mutable_data<float>());
return true;
}
namespace {
REGISTER_CUDA_OPERATOR(LeakyRelu, LeakyReluOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
LeakyReluGradient,
LeakyReluGradientOp<float, CUDAContext>);
} // namespace
} // namespace caffe2