Use softer error reporting instead of CHECK*

  - CHECK(x) checks whether condition x holds and LOG(FATAL) if not, which
    will result in calling abort().
  - This change uses nnOpsCheck which would log the failing condition and
    return false to the runtime, allowing graceful failures.

Bug: 63905942
Test: NeuralNetworkTests pass
Change-Id: I8b1217f777638f974c91fa429449e39d37218af6
diff --git a/common/OperationsUtils.cpp b/common/OperationsUtils.cpp
index ae7c827..f38fcc1 100644
--- a/common/OperationsUtils.cpp
+++ b/common/OperationsUtils.cpp
@@ -66,47 +66,56 @@
 }
 
 
-void QuantizeMultiplierSmallerThanOne(double double_multiplier,
+// Macro to check if the input parameters for operation are valid or not.
+#define NN_OPS_CHECK(v)                                                  \
+    if (!(v)) {                                                          \
+        LOG(ERROR) << "NN_OPS_CHECK failed: "  << #v << "'\n";           \
+        return false;                                                    \
+    }
+
+bool QuantizeMultiplierSmallerThanOne(double double_multiplier,
                                       int32_t* quantized_multiplier,
                                       int32_t* right_shift) {
-    CHECK(double_multiplier >= 0.);
-    CHECK(double_multiplier < 1.);
+    NN_OPS_CHECK(double_multiplier >= 0.);
+    NN_OPS_CHECK(double_multiplier < 1.);
     if (double_multiplier == 0.) {
         *quantized_multiplier = 0;
         *right_shift = 0;
-        return;
+        return true;
     }
-    CHECK(double_multiplier > 0.);
+    NN_OPS_CHECK(double_multiplier > 0.);
     const double q = std::frexp(double_multiplier, right_shift);
     *right_shift *= -1;
     int64_t q_fixed = static_cast<int64_t>(std::round(q * (1ll << 31)));
-    CHECK(q_fixed <= (1ll << 31));
+    NN_OPS_CHECK(q_fixed <= (1ll << 31));
     if (q_fixed == (1ll << 31)) {
         q_fixed /= 2;
         --*right_shift;
     }
-    CHECK_GE(*right_shift, 0);
-    CHECK_LE(q_fixed, std::numeric_limits<int32_t>::max());
+    NN_OPS_CHECK(*right_shift >= 0);
+    NN_OPS_CHECK(q_fixed <= std::numeric_limits<int32_t>::max());
     *quantized_multiplier = static_cast<int32_t>(q_fixed);
+    return true;
 }
 
-void QuantizeMultiplierGreaterThanOne(double double_multiplier,
+bool QuantizeMultiplierGreaterThanOne(double double_multiplier,
                                       int32_t* quantized_multiplier,
                                       int* left_shift) {
-    CHECK(double_multiplier > 1.);
+    NN_OPS_CHECK(double_multiplier > 1.);
     const double q = std::frexp(double_multiplier, left_shift);
     int64_t q_fixed = static_cast<int64_t>(std::round(q * (1ll << 31)));
-    CHECK(q_fixed <= (1ll << 31));
+    NN_OPS_CHECK(q_fixed <= (1ll << 31));
     if (q_fixed == (1ll << 31)) {
         q_fixed /= 2;
         ++*left_shift;
     }
-    CHECK_GE(*left_shift, 0);
-    CHECK_LE(q_fixed, std::numeric_limits<int32_t>::max());
+    NN_OPS_CHECK(*left_shift >= 0);
+    NN_OPS_CHECK(q_fixed <= std::numeric_limits<int32_t>::max());
     *quantized_multiplier = static_cast<int32_t>(q_fixed);
+    return true;
 }
 
-void GetQuantizedConvolutionMultipler(const Shape& inputShape,
+bool GetQuantizedConvolutionMultipler(const Shape& inputShape,
                                       const Shape& filterShape,
                                       const Shape& biasShape,
                                       const Shape& outputShape,
@@ -116,11 +125,12 @@
     const float output_scale = outputShape.scale;
 
     // The following conditions must be guaranteed by the training pipeline.
-    CHECK(std::abs(input_product_scale - bias_scale) <=
+    NN_OPS_CHECK(std::abs(input_product_scale - bias_scale) <=
               1e-6 * std::min(input_product_scale, bias_scale));
-    CHECK(input_product_scale >= 0);
-    CHECK(input_product_scale < output_scale);
+    NN_OPS_CHECK(input_product_scale >= 0);
+    NN_OPS_CHECK(input_product_scale < output_scale);
     *multiplier = input_product_scale / output_scale;
+    return true;
 }
 
 void CalculateActivationRangeUint8(int32_t activation,
@@ -162,16 +172,8 @@
     return static_cast<int32_t>(std::floor(max_input_rescaled));
 }
 
-
-// Macro to check if the input parameters for operation are valid or not.
-#define nnOpsCheck(v)                                                                      \
-    if (!(v)) {                                                                            \
-        LOG(ERROR) << "nnOpsCheck failed: "  << #v << "'\n";                               \
-        return false;                                                                      \
-    }
-
 bool addMulPrepare(const Shape& in1, const Shape& in2, Shape* out) {
-    nnOpsCheck(getNumberOfDimensions(in1) <= 4 && getNumberOfDimensions(in2) <= 4);
+    NN_OPS_CHECK(getNumberOfDimensions(in1) <= 4 && getNumberOfDimensions(in2) <= 4);
     if (SameShape(in1, in2)) {
         return SetShape(in1, out);
     } else {
@@ -224,14 +226,14 @@
                  int32_t padding_top, int32_t padding_bottom,
                  int32_t stride_width, int32_t stride_height,
                  Shape* output) {
-    nnOpsCheck(getNumberOfDimensions(input) == 4);
-    nnOpsCheck(getNumberOfDimensions(filter) == 4);
-    nnOpsCheck(getNumberOfDimensions(bias) == 1);
+    NN_OPS_CHECK(getNumberOfDimensions(input) == 4);
+    NN_OPS_CHECK(getNumberOfDimensions(filter) == 4);
+    NN_OPS_CHECK(getNumberOfDimensions(bias) == 1);
 
-    nnOpsCheck(getSizeOfDimension(filter, 0) == getSizeOfDimension(bias, 0));
-    nnOpsCheck(getSizeOfDimension(filter, 3) == getSizeOfDimension(input, 3));
+    NN_OPS_CHECK(getSizeOfDimension(filter, 0) == getSizeOfDimension(bias, 0));
+    NN_OPS_CHECK(getSizeOfDimension(filter, 3) == getSizeOfDimension(input, 3));
 
-    nnOpsCheck(stride_width == stride_height);
+    NN_OPS_CHECK(stride_width == stride_height);
 
     uint32_t channels_out = getSizeOfDimension(filter, 0);
     uint32_t width        = getSizeOfDimension(input, 2);
@@ -257,13 +259,13 @@
                           int32_t padding_top, int32_t padding_bottom,
                           int32_t stride_width, int32_t stride_height,
                           Shape* output) {
-    nnOpsCheck(getNumberOfDimensions(input) == 4);
-    nnOpsCheck(getNumberOfDimensions(filter) == 4);
-    nnOpsCheck(getNumberOfDimensions(bias) == 1);
+    NN_OPS_CHECK(getNumberOfDimensions(input) == 4);
+    NN_OPS_CHECK(getNumberOfDimensions(filter) == 4);
+    NN_OPS_CHECK(getNumberOfDimensions(bias) == 1);
 
-    nnOpsCheck(getSizeOfDimension(filter, 3) == getSizeOfDimension(bias, 0));
+    NN_OPS_CHECK(getSizeOfDimension(filter, 3) == getSizeOfDimension(bias, 0));
 
-    nnOpsCheck(stride_width == stride_height);
+    NN_OPS_CHECK(stride_width == stride_height);
 
     uint32_t channels_out = getSizeOfDimension(filter, 3);
     uint32_t width        = getSizeOfDimension(input, 2);
@@ -289,8 +291,8 @@
                            int32_t stride_width, int32_t stride_height,
                            int32_t filter_width, int32_t filter_height,
                            Shape* output) {
-    nnOpsCheck(getNumberOfDimensions(input) == 4);
-    nnOpsCheck(stride_width == stride_height);
+    NN_OPS_CHECK(getNumberOfDimensions(input) == 4);
+    NN_OPS_CHECK(stride_width == stride_height);
 
     uint32_t batches      = getSizeOfDimension(input, 0);
     uint32_t width        = getSizeOfDimension(input, 2);
@@ -310,7 +312,7 @@
 
 bool genericActivationPrepare(const Shape& input,
                               Shape* output) {
-    nnOpsCheck(getNumberOfDimensions(input) <= 4);
+    NN_OPS_CHECK(getNumberOfDimensions(input) <= 4);
     return SetShape(input, output);
 }
 
@@ -324,9 +326,9 @@
     uint32_t num_units  = getSizeOfDimension(weights, 0);
     uint32_t batch_size = input_size / getSizeOfDimension(weights, 1);
 
-    nnOpsCheck(getSizeOfDimension(bias, 0) == num_units);
-    nnOpsCheck(getSizeOfDimension(weights, 1) * batch_size == input_size);
-    nnOpsCheck(getNumberOfDimensions(weights) == 2);
+    NN_OPS_CHECK(getSizeOfDimension(bias, 0) == num_units);
+    NN_OPS_CHECK(getSizeOfDimension(weights, 1) * batch_size == input_size);
+    NN_OPS_CHECK(getNumberOfDimensions(weights) == 2);
 
     output->type = input.type;
     output->dimensions = {batch_size, num_units};
@@ -342,22 +344,22 @@
     OperandType input_type = inputShapes[0].type;
     uint32_t num_dimensions = getNumberOfDimensions(inputShapes[0]);
 
-    nnOpsCheck(axis >= 0);
-    nnOpsCheck(axis < (int32_t)num_dimensions);
+    NN_OPS_CHECK(axis >= 0);
+    NN_OPS_CHECK(axis < (int32_t)num_dimensions);
 
     int sum_axis = getSizeOfDimension(inputShapes[0], axis);
     for (int i = 1; i < num_inputs; ++i) {
-        nnOpsCheck(getNumberOfDimensions(inputShapes[i]) == num_dimensions);
-        nnOpsCheck(inputShapes[i].type == inputShapes[0].type);
+        NN_OPS_CHECK(getNumberOfDimensions(inputShapes[i]) == num_dimensions);
+        NN_OPS_CHECK(inputShapes[i].type == inputShapes[0].type);
         if (input_type == OperandType::TENSOR_QUANT8_ASYMM) {
-            nnOpsCheck(inputShapes[0].offset == inputShapes[i].offset);
-            nnOpsCheck(inputShapes[0].scale == inputShapes[i].scale);
+            NN_OPS_CHECK(inputShapes[0].offset == inputShapes[i].offset);
+            NN_OPS_CHECK(inputShapes[0].scale == inputShapes[i].scale);
         }
         for (int d = 0; d < (int32_t)num_dimensions; ++d) {
             if (d == axis) {
                 sum_axis += getSizeOfDimension(inputShapes[i], axis);
             } else {
-                nnOpsCheck(getSizeOfDimension(inputShapes[0], d) ==
+                NN_OPS_CHECK(getSizeOfDimension(inputShapes[0], d) ==
                            getSizeOfDimension(inputShapes[i], d));
             }
         }
@@ -368,8 +370,8 @@
     output->dimensions[axis] = sum_axis;
 
     if (input_type == OperandType::TENSOR_QUANT8_ASYMM) {
-        nnOpsCheck(inputShapes[0].offset == output->offset);
-        nnOpsCheck(inputShapes[0].scale == output->scale);
+        NN_OPS_CHECK(inputShapes[0].offset == output->offset);
+        NN_OPS_CHECK(inputShapes[0].scale == output->scale);
     }
 
     return true;
@@ -377,7 +379,7 @@
 
 
 bool genericNormalizationPrepare(const Shape& input, Shape* output) {
-    nnOpsCheck(getNumberOfDimensions(input) == 4);
+    NN_OPS_CHECK(getNumberOfDimensions(input) == 4);
     return SetShape(input, output);
 }
 
@@ -397,7 +399,7 @@
     for (int32_t i = 0; i < targetDimsSize; ++i) {
         int32_t value = targetDims[i];
         if (value == -1) {
-            nnOpsCheck(strechDim == -1);
+            NN_OPS_CHECK(strechDim == -1);
             strechDim = i;
         } else {
             numOutputElements *= value;
@@ -410,7 +412,7 @@
         numOutputElements *= strechValue;
     }
 
-    nnOpsCheck(numInputElements == numOutputElements);
+    NN_OPS_CHECK(numInputElements == numOutputElements);
 
     output->type = input.type;
     output->dimensions = outDims;
@@ -424,7 +426,7 @@
                            int32_t width,
                            int32_t height,
                            Shape* output) {
-    nnOpsCheck(getNumberOfDimensions(input) == 4);
+    NN_OPS_CHECK(getNumberOfDimensions(input) == 4);
     uint32_t batches  = getSizeOfDimension(input, 0);
     uint32_t channels = getSizeOfDimension(input, 3);
 
@@ -437,15 +439,15 @@
 bool depthToSpacePrepare(const Shape& input,
                          int32_t blockSize,
                          Shape* output) {
-    nnOpsCheck(getNumberOfDimensions(input) == 4);
-    nnOpsCheck(blockSize > 0);
+    NN_OPS_CHECK(getNumberOfDimensions(input) == 4);
+    NN_OPS_CHECK(blockSize > 0);
 
     uint32_t batches  = getSizeOfDimension(input, 0);
     uint32_t height   = getSizeOfDimension(input, 1);
     uint32_t width    = getSizeOfDimension(input, 2);
     uint32_t channels = getSizeOfDimension(input, 3);
 
-    nnOpsCheck(channels % (blockSize * blockSize) == 0);
+    NN_OPS_CHECK(channels % (blockSize * blockSize) == 0);
     output->type = input.type;
     output->dimensions = {batches,
                           height * blockSize,
@@ -460,16 +462,16 @@
 bool spaceToDepthPrepare(const Shape& input,
                          int32_t blockSize,
                          Shape* output) {
-    nnOpsCheck(getNumberOfDimensions(input) == 4);
-    nnOpsCheck(blockSize > 0);
+    NN_OPS_CHECK(getNumberOfDimensions(input) == 4);
+    NN_OPS_CHECK(blockSize > 0);
 
     uint32_t batches  = getSizeOfDimension(input, 0);
     uint32_t height   = getSizeOfDimension(input, 1);
     uint32_t width    = getSizeOfDimension(input, 2);
     uint32_t channels = getSizeOfDimension(input, 3);
 
-    nnOpsCheck(height % blockSize == 0);
-    nnOpsCheck(width % blockSize == 0);
+    NN_OPS_CHECK(height % blockSize == 0);
+    NN_OPS_CHECK(width % blockSize == 0);
 
     output->type = input.type;
     output->dimensions = {batches,
diff --git a/common/include/OperationsUtils.h b/common/include/OperationsUtils.h
index 88783af..d13e084 100644
--- a/common/include/OperationsUtils.h
+++ b/common/include/OperationsUtils.h
@@ -78,15 +78,18 @@
     return (imageSize - filterSize + stride + paddingHead + paddingTail) / stride;
 }
 
-void QuantizeMultiplierSmallerThanOne(double double_multiplier,
+__wur
+bool QuantizeMultiplierSmallerThanOne(double double_multiplier,
                                       int32_t* quantized_multiplier,
                                       int32_t* right_shift);
 
-void QuantizeMultiplierGreaterThanOne(double double_multiplier,
+__wur
+bool QuantizeMultiplierGreaterThanOne(double double_multiplier,
                                       int32_t* quantized_multiplier,
                                       int* left_shift);
 
-void GetQuantizedConvolutionMultipler(const Shape& inputShape,
+__wur
+bool GetQuantizedConvolutionMultipler(const Shape& inputShape,
                                       const Shape& filterShape,
                                       const Shape& biasShape,
                                       const Shape& outputShape,
diff --git a/common/operations/Activation.cpp b/common/operations/Activation.cpp
index c649c22..11f54bb 100644
--- a/common/operations/Activation.cpp
+++ b/common/operations/Activation.cpp
@@ -136,9 +136,11 @@
 
     int32_t input_multiplier = 0;
     int32_t input_left_shift = 0;
-    QuantizeMultiplierGreaterThanOne(input_real_multiplier,
-                                     &input_multiplier,
-                                     &input_left_shift);
+    if (!QuantizeMultiplierGreaterThanOne(input_real_multiplier,
+                                          &input_multiplier,
+                                          &input_left_shift)) {
+        return false;
+    }
     int32_t input_range_radius =
             CalculateInputRadius(kInputIntegerBits, input_left_shift);
 
@@ -180,9 +182,11 @@
 
     int32_t input_multiplier = 0;
     int32_t input_left_shift = 0;
-    QuantizeMultiplierGreaterThanOne(input_beta_real_multiplier,
-                                     &input_multiplier,
-                                     &input_left_shift);
+    if (!QuantizeMultiplierGreaterThanOne(input_beta_real_multiplier,
+                                          &input_multiplier,
+                                          &input_left_shift)) {
+        return false;
+    }
     float diff_min = -1.0f * CalculateInputRadius(kScaledDiffIntegerBits,
                                                   input_left_shift);
 
diff --git a/common/operations/Conv2D.cpp b/common/operations/Conv2D.cpp
index cbdc4f8..149ac8e 100644
--- a/common/operations/Conv2D.cpp
+++ b/common/operations/Conv2D.cpp
@@ -106,10 +106,12 @@
     int32_t output_activation_min = 0;
     int32_t output_activation_max = 0;
 
-    GetQuantizedConvolutionMultipler(inputShape, filterShape, biasShape,
-                                     outputShape, &real_multiplier);
-    QuantizeMultiplierSmallerThanOne(real_multiplier, &output_multiplier,
-                                     &output_shift);
+    if (!GetQuantizedConvolutionMultipler(inputShape, filterShape, biasShape,
+                                          outputShape, &real_multiplier) ||
+            !QuantizeMultiplierSmallerThanOne(real_multiplier, &output_multiplier,
+                                              &output_shift)){
+        return false;
+    }
     CalculateActivationRangeUint8(activation, outputShape,
                                   &output_activation_min,
                                   &output_activation_max);
diff --git a/common/operations/DepthwiseConv2D.cpp b/common/operations/DepthwiseConv2D.cpp
index 9f43d56..8c4e58c 100644
--- a/common/operations/DepthwiseConv2D.cpp
+++ b/common/operations/DepthwiseConv2D.cpp
@@ -77,10 +77,13 @@
     int32_t output_activation_min = 0;
     int32_t output_activation_max = 0;
 
-    GetQuantizedConvolutionMultipler(inputShape, filterShape, biasShape,
-                                     outputShape, &real_multiplier);
-    QuantizeMultiplierSmallerThanOne(real_multiplier, &output_multiplier,
-                                     &output_shift);
+
+    if (!GetQuantizedConvolutionMultipler(inputShape, filterShape, biasShape,
+                                          outputShape, &real_multiplier) ||
+            !QuantizeMultiplierSmallerThanOne(real_multiplier, &output_multiplier,
+                                              &output_shift)) {
+        return false;
+    }
     CalculateActivationRangeUint8(activation, outputShape,
                                   &output_activation_min,
                                   &output_activation_max);
diff --git a/common/operations/FullyConnected.cpp b/common/operations/FullyConnected.cpp
index 0cfc9c9..7f4aa6b 100644
--- a/common/operations/FullyConnected.cpp
+++ b/common/operations/FullyConnected.cpp
@@ -57,10 +57,12 @@
     int32_t output_activation_min = 0;
     int32_t output_activation_max = 0;
 
-    GetQuantizedConvolutionMultipler(inputShape, weightsShape, biasShape,
-                                     outputShape, &real_multiplier);
-    QuantizeMultiplierSmallerThanOne(real_multiplier, &output_multiplier,
-                                     &output_shift);
+    if (!GetQuantizedConvolutionMultipler(inputShape, weightsShape, biasShape,
+                                          outputShape, &real_multiplier) ||
+            !QuantizeMultiplierSmallerThanOne(real_multiplier, &output_multiplier,
+                                              &output_shift)) {
+        return false;
+    }
     CalculateActivationRangeUint8(activation, outputShape,
                                   &output_activation_min,
                                   &output_activation_max);