Cleanup code
diff --git a/tensorflow/stream_executor/stream.h b/tensorflow/stream_executor/stream.h
index 498fb11..3a99e10 100644
--- a/tensorflow/stream_executor/stream.h
+++ b/tensorflow/stream_executor/stream.h
@@ -29,7 +29,6 @@
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/stream_executor/blas.h"
-#include "tensorflow/stream_executor/cuda/cuda_dnn.h"
#include "tensorflow/stream_executor/device_memory.h"
#include "tensorflow/stream_executor/dnn.h"
#include "tensorflow/stream_executor/event.h"
@@ -42,6 +41,10 @@
#include "tensorflow/stream_executor/stream_executor_pimpl.h"
#include "tensorflow/stream_executor/temporary_memory_manager.h"
+#if GOOGLE_CUDA
+#include "tensorflow/stream_executor/cuda/cuda_dnn.h"
+#endif // GOOGLE_CUDA
+
namespace stream_executor {
namespace host {
@@ -363,6 +366,7 @@
DeviceMemory<OutputType> *output, ScratchAllocator *scratch_allocator,
const dnn::AlgorithmConfig &plan_config,
dnn::ProfileResult *output_profile_result) {
+#if GOOGLE_CUDA
dnn::DnnSupport *dnn = parent_->AsDnn();
if (dnn) {
gpu::CudnnSupport *cudnn_dnn = dynamic_cast<gpu::CudnnSupport*>(dnn);
@@ -373,6 +377,7 @@
output_descriptor, *output, convolution_descriptor, plan_config,
scratch_allocator, output_profile_result);
}
+#endif // GOOGLE_CUDA
return port::UnimplementedError("DNN library is not found.");
}
@@ -469,6 +474,7 @@
ScratchAllocator *scratch_allocator,
const dnn::AlgorithmConfig &plan_config,
dnn::ProfileResult *output_profile_result) {
+#if GOOGLE_CUDA
dnn::DnnSupport *dnn = parent_->AsDnn();
if (dnn) {
gpu::CudnnSupport *cudnn_dnn = dynamic_cast<gpu::CudnnSupport*>(dnn);
@@ -480,6 +486,7 @@
output_descriptor, backward_output_data, convolution_descriptor,
plan_config, scratch_allocator, output_profile_result);
}
+#endif // GOOGLE_CUDA
return port::UnimplementedError("DNN library is not found.");
}
@@ -559,6 +566,7 @@
ScratchAllocator *scratch_allocator,
const dnn::AlgorithmConfig &plan_config,
dnn::ProfileResult *output_profile_result) {
+#if GOOGLE_CUDA
dnn::DnnSupport *dnn = parent_->AsDnn();
if (dnn) {
gpu::CudnnSupport *cudnn_dnn = dynamic_cast<gpu::CudnnSupport*>(dnn);
@@ -570,6 +578,7 @@
output_descriptor, backward_output_data, convolution_descriptor,
plan_config, scratch_allocator, output_profile_result);
}
+#endif // GOOGLE_CUDA
return port::UnimplementedError("DNN library is not found.");
}