Fix remaining CUDA >= 300 checks
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h
index fa7364f..65638b6 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h
@@ -41,9 +41,6 @@
return;
}
}
-#else
- assert(0 && "Shouldn't be called on unsupported device");
-#endif
}
else if (sizeof(T) == 8) {
unsigned long long oldval = *reinterpret_cast<unsigned long long*>(output);
@@ -65,6 +62,9 @@
else {
assert(0 && "Wordsize not supported");
}
+#else
+ assert(0 && "Shouldn't be called on unsupported device");
+#endif
}
// We extend atomicExch to support extra data types
@@ -373,6 +373,7 @@
typename Reducer, typename Index>
__global__ void InnerReductionKernel(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs,
typename Self::CoeffReturnType* output) {
+#if __CUDA_ARCH__ >= 300
typedef typename Self::CoeffReturnType Type;
eigen_assert(blockDim.y == 1);
eigen_assert(blockDim.z == 1);
@@ -433,6 +434,9 @@
}
}
}
+#else
+ assert(0 && "Shouldn't be called on unsupported device");
+#endif
}
#ifdef EIGEN_HAS_CUDA_FP16