Update hvxservice to use the correct getPadding implementation. am: ba9a2c7ebe am: d00708a634 am: 7e2e6e231c am: a9651a94af
am: d5584f9613

Change-Id: I6d2bc111cdf920fc35d72e78b07d1ce57f51a4b9
diff --git a/.clang-format b/.clang-format
new file mode 100644
index 0000000..9b3f9d9
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,24 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+BasedOnStyle: Google
+CommentPragmas: NOLINT:.*
+DerivePointerAlignment: false
+AllowShortFunctionsOnASingleLine: Inline
+ColumnLimit: 100
+TabWidth: 4
+UseTab: Never
+IndentWidth: 4
diff --git a/1.0/Android.bp b/1.0/Android.bp
index d8d5c91..a57f169 100644
--- a/1.0/Android.bp
+++ b/1.0/Android.bp
@@ -44,6 +44,7 @@
         "liblog",
         "libutils",
         "android.hardware.neuralnetworks@1.0",
+        "android.hardware.neuralnetworks@1.1",
         "android.hidl.allocator@1.0",
         "android.hidl.memory@1.0",
     ],
diff --git a/1.0/Device.cpp b/1.0/Device.cpp
index 29437f8..264223b 100644
--- a/1.0/Device.cpp
+++ b/1.0/Device.cpp
@@ -17,13 +17,14 @@
 #define LOG_TAG "android.hardware.neuralnetworks@1.0-impl-hvx"
 
 #include "Device.h"
+#include <android-base/logging.h>
+#include <memory>
+#include <mutex>
+#include <thread>
 #include "HexagonModel.h"
 #include "HexagonUtils.h"
 #include "PreparedModel.h"
-#include <android-base/logging.h>
-#include <mutex>
-#include <thread>
-#include <memory>
+#include "ValidateHal.h"
 
 namespace android {
 namespace hardware {
@@ -37,29 +38,31 @@
 
 static std::once_flag configure_nnlib;
 static void configureHexagon() {
-    std::call_once(configure_nnlib, [](){ hexagon::Controller::getInstance().config(); });
+    std::call_once(configure_nnlib, []() {
+        hexagon::Controller::getInstance().config();
+        hexagon::Controller::getInstance().boost(100);
+    });
 }
 
 Return<void> Device::getCapabilities(getCapabilities_cb _hidl_cb) {
     configureHexagon();
 
+    // These numbers are approximations for this release.
+    // TODO Change with the actual number.
     PerformanceInfo float32Performance = {
-        .execTime   = 100.0f, // nanoseconds?
-        .powerUsage = 1.0f,   // picoJoules
+        .execTime = 30.0f, .powerUsage = 2.0f,
     };
 
     PerformanceInfo quantized8Performance = {
-        .execTime   = 100.0f, // nanoseconds?
-        .powerUsage = 1.0f,   // picoJoules
+        .execTime = 0.7f, .powerUsage = 0.7f,
     };
 
     Capabilities capabilities = {
-        .float32Performance    = float32Performance,
-        .quantized8Performance = quantized8Performance,
+        .float32Performance = float32Performance, .quantized8Performance = quantized8Performance,
     };
 
     ErrorStatus status =
-            hexagon::isHexagonAvailable() ? ErrorStatus::NONE : ErrorStatus::DEVICE_UNAVAILABLE;
+        hexagon::isHexagonAvailable() ? ErrorStatus::NONE : ErrorStatus::DEVICE_UNAVAILABLE;
 
     _hidl_cb(status, capabilities);
     return Void();
@@ -85,14 +88,17 @@
     return Void();
 }
 
-void Device::asyncPrepare(const Model& model, const sp<IPreparedModelCallback>& callback) {
+static void asyncPrepare(const Model& model, const sp<IPreparedModelCallback>& callback) {
     std::shared_ptr<hexagon::Model> hexagonModel = std::make_shared<hexagon::Model>(model);
 
-    if (hexagonModel->compile()) {
-        callback->notify(ErrorStatus::NONE, new PreparedModel(model, hexagonModel));
+    Return<void> ret;
+    if (hexagonModel->prepare()) {
+        ret = callback->notify(ErrorStatus::NONE, new PreparedModel(model, hexagonModel));
+    } else {
+        ret = callback->notify(ErrorStatus::GENERAL_FAILURE, nullptr);
     }
-    else {
-        callback->notify(ErrorStatus::GENERAL_FAILURE, nullptr);
+    if (!ret.isOk()) {
+        LOG(ERROR) << "Error in callback's return type: " << ret.description();
     }
 }
 
@@ -113,9 +119,9 @@
         return ErrorStatus::DEVICE_UNAVAILABLE;
     }
 
-    // This thread is intentionally detached because the sample driver service
-    // is expected to live forever.
-    std::thread([this, model, callback]{ asyncPrepare(model, callback); }).detach();
+    // TODO: once nnlib hanging issue is resolved, make this function
+    // asynchronous again
+    asyncPrepare(model, callback);
 
     return ErrorStatus::NONE;
 }
@@ -123,7 +129,7 @@
 Return<DeviceStatus> Device::getStatus() {
     configureHexagon();
     mCurrentStatus =
-            hexagon::isHexagonAvailable() ? DeviceStatus::AVAILABLE : DeviceStatus::OFFLINE;
+        hexagon::isHexagonAvailable() ? DeviceStatus::AVAILABLE : DeviceStatus::OFFLINE;
     return mCurrentStatus;
 }
 
diff --git a/1.0/Device.h b/1.0/Device.h
index e953f81..cae055c 100644
--- a/1.0/Device.h
+++ b/1.0/Device.h
@@ -30,13 +30,13 @@
 namespace V1_0 {
 namespace implementation {
 
+using ::android::sp;
 using ::android::hardware::hidl_array;
 using ::android::hardware::hidl_memory;
 using ::android::hardware::hidl_string;
 using ::android::hardware::hidl_vec;
 using ::android::hardware::Return;
 using ::android::hardware::Void;
-using ::android::sp;
 
 struct Device : public IDevice {
     Device();
@@ -50,9 +50,7 @@
                                      const sp<IPreparedModelCallback>& callback) override;
     Return<DeviceStatus> getStatus() override;
 
-private:
-    void asyncPrepare(const Model& model, const sp<IPreparedModelCallback>& callback);
-
+   private:
     DeviceStatus mCurrentStatus;
 };
 
diff --git a/1.0/HexagonController.cpp b/1.0/HexagonController.cpp
index be36f40..d2753ac 100644
--- a/1.0/HexagonController.cpp
+++ b/1.0/HexagonController.cpp
@@ -19,13 +19,45 @@
 #include "HexagonController.h"
 
 #define LOAD_HEXAGON_FUNCTION(name) \
-    mFn_##name = loadFunction<hexagon_nn_controller_##name##_fn>("hexagon_nn_controller_"#name);
+    mFn_##name = loadFunction<hexagon_nn_controller_##name##_fn>("hexagon_nn_controller_" #name);
 
-#define CONTROLLER_CHECK(function, ...)                         \
-    int err = mFn_##function(__VA_ARGS__);                      \
-    if (err != 0) {                                             \
-        return err;                                             \
-    }
+#define CLOSE_HEXAGON_FUNCTION(name) mFn_##name = nullptr;
+
+#define FOR_EACH_FUNCTION(MACRO)   \
+    MACRO(init)                    \
+    MACRO(getlog)                  \
+    MACRO(snpprint)                \
+    MACRO(set_debug_level)         \
+    MACRO(prepare)                 \
+    MACRO(append_node)             \
+    MACRO(append_const_node)       \
+    MACRO(execute_new)             \
+    MACRO(execute)                 \
+    MACRO(teardown)                \
+    MACRO(get_perfinfo)            \
+    MACRO(reset_perfinfo)          \
+    MACRO(version)                 \
+    MACRO(last_execution_cycles)   \
+    MACRO(GetHexagonBinaryVersion) \
+    MACRO(PrintLog)                \
+    MACRO(op_name_to_id)           \
+    MACRO(op_id_to_name)           \
+    MACRO(disable_dcvs)            \
+    MACRO(set_powersave_level)     \
+    MACRO(config)                  \
+    MACRO(get_dsp_offset)          \
+    MACRO(boost)                   \
+    MACRO(slow)
+
+#define CONTROLLER_CHECK(function, ...)    \
+    if (mFn_##function == nullptr) {       \
+        return -1;                         \
+    }                                      \
+    int err = mFn_##function(__VA_ARGS__); \
+    if (err != 0) {                        \
+        return err;                        \
+    }                                      \
+    return 0;
 
 namespace android {
 namespace hardware {
@@ -34,41 +66,36 @@
 namespace implementation {
 namespace hexagon {
 
+const char Controller::kFilename[] = "libhexagon_nn_controller.so";
+
 Controller::Controller() {
-    const char* filename = "libhexagon_nn_controller.so";
-
-    mHandle = dlopen(filename, RTLD_LAZY | RTLD_LOCAL);
-    if (mHandle == nullptr) {
-        LOG(ERROR) << "FAILED TO LOAD LIBRARY libhexagon_nn_controller: " << dlerror();
-    }
-
-    LOAD_HEXAGON_FUNCTION(init)
-    LOAD_HEXAGON_FUNCTION(getlog)
-    LOAD_HEXAGON_FUNCTION(snpprint)
-    LOAD_HEXAGON_FUNCTION(set_debug_level)
-    LOAD_HEXAGON_FUNCTION(prepare)
-    LOAD_HEXAGON_FUNCTION(append_node)
-    LOAD_HEXAGON_FUNCTION(append_const_node)
-    LOAD_HEXAGON_FUNCTION(execute_new)
-    LOAD_HEXAGON_FUNCTION(execute)
-    LOAD_HEXAGON_FUNCTION(teardown)
-    LOAD_HEXAGON_FUNCTION(get_perfinfo)
-    LOAD_HEXAGON_FUNCTION(reset_perfinfo)
-    LOAD_HEXAGON_FUNCTION(version)
-    LOAD_HEXAGON_FUNCTION(last_execution_cycles)
-    LOAD_HEXAGON_FUNCTION(GetHexagonBinaryVersion)
-    LOAD_HEXAGON_FUNCTION(PrintLog)
-    LOAD_HEXAGON_FUNCTION(op_name_to_id)
-    LOAD_HEXAGON_FUNCTION(op_id_to_name)
-    LOAD_HEXAGON_FUNCTION(disable_dcvs)
-    LOAD_HEXAGON_FUNCTION(set_powersave_level)
-    LOAD_HEXAGON_FUNCTION(config)
+    openNnlib();
 }
 
 Controller::~Controller() {
+    closeNnlib();
+}
+
+bool Controller::openNnlib() {
+    mHandle = dlopen(kFilename, RTLD_LAZY | RTLD_LOCAL);
+    HEXAGON_SOFT_ASSERT_NE(mHandle, 0,
+                           "FAILED TO LOAD LIBRARY " /* << kFilename << ": " << dlerror()*/);
+    FOR_EACH_FUNCTION(LOAD_HEXAGON_FUNCTION)
+    return true;
+}
+
+bool Controller::closeNnlib() {
+    FOR_EACH_FUNCTION(CLOSE_HEXAGON_FUNCTION)
     if (mHandle != nullptr) {
-        dlclose(mHandle);
+        int err = dlclose(mHandle);
+        mHandle = nullptr;
+        HEXAGON_SOFT_ASSERT_EQ(err, 0, "FAILED TO CLOSE LIBRARY " << kFilename);
     }
+    return true;
+}
+
+bool Controller::resetNnlib() {
+    return closeNnlib() && openNnlib();
 }
 
 Controller& Controller::getInstance() {
@@ -76,259 +103,120 @@
     return instance;
 }
 
-hexagon_nn_nn_id Controller::init() {
-    if (mFn_init == nullptr) {
-        return hexagon_nn_nn_id{};
-    }
-
-    hexagon_nn_nn_id id = mFn_init();
-
-    return id;
+int Controller::init(hexagon_nn_nn_id* g) {
+    CONTROLLER_CHECK(init, g);
 }
 
-int Controller::getlog(hexagon_nn_nn_id id, unsigned char *buf, uint32_t length) {
-    if (mFn_getlog == nullptr) {
-        return -1;
-    }
-
+int Controller::getlog(hexagon_nn_nn_id id, unsigned char* buf, uint32_t length) {
     CONTROLLER_CHECK(getlog, id, buf, length);
-
-    return 0;
 }
 
-int Controller::snpprint(hexagon_nn_nn_id id, unsigned char *buf, uint32_t length) {
-    if (mFn_snpprint == nullptr) {
-        return -1;
-    }
-
+int Controller::snpprint(hexagon_nn_nn_id id, unsigned char* buf, uint32_t length) {
     CONTROLLER_CHECK(snpprint, id, buf, length);
-
-    return 0;
 }
 
 int Controller::set_debug_level(hexagon_nn_nn_id id, int level) {
-    if (mFn_set_debug_level == nullptr) {
-        return -1;
-    }
-
     CONTROLLER_CHECK(set_debug_level, id, level);
-
-    return 0;
 }
 
 int Controller::prepare(hexagon_nn_nn_id id) {
-    if (mFn_prepare == nullptr) {
-        return -1;
-    }
-
     CONTROLLER_CHECK(prepare, id);
-
-    return 0;
 }
 
-int Controller::append_node(hexagon_nn_nn_id id,
-                         uint32_t node_id,
-                         op_type operation,
-                         hexagon_nn_padding_type padding,
-                         const hexagon_nn_input *inputs,
-                         uint32_t num_inputs,
-                         const hexagon_nn_output *outputs,
-                         uint32_t num_outputs) {
-    if (mFn_append_node == nullptr) {
-        return -1;
-    }
-
-    CONTROLLER_CHECK(append_node, id, node_id, operation, padding, inputs, num_inputs,
-                     outputs, num_outputs);
-
-    return 0;
+int Controller::append_node(hexagon_nn_nn_id id, uint32_t node_id, op_type operation,
+                            hexagon_nn_padding_type padding, const hexagon_nn_input* inputs,
+                            uint32_t num_inputs, const hexagon_nn_output* outputs,
+                            uint32_t num_outputs) {
+    CONTROLLER_CHECK(append_node, id, node_id, operation, padding, inputs, num_inputs, outputs,
+                     num_outputs);
 }
 
-int Controller::append_const_node(hexagon_nn_nn_id id,
-                               uint32_t node_id,
-                               uint32_t batches,
-                               uint32_t height,
-                               uint32_t width,
-                               uint32_t depth,
-                               const uint8_t *data,
-                               uint32_t data_len) {
-    if (mFn_append_const_node == nullptr) {
-        return -1;
-    }
-
-    CONTROLLER_CHECK(append_const_node, id, node_id, batches, height, width, depth,
-                     data, data_len);
-
-    return 0;
+int Controller::append_const_node(hexagon_nn_nn_id id, uint32_t node_id, uint32_t batches,
+                                  uint32_t height, uint32_t width, uint32_t depth,
+                                  const uint8_t* data, uint32_t data_len) {
+    CONTROLLER_CHECK(append_const_node, id, node_id, batches, height, width, depth, data, data_len);
 }
 
-int Controller::execute_new(hexagon_nn_nn_id id,
-                         const hexagon_nn_tensordef *inputs,
-                         uint32_t n_inputs,
-                         hexagon_nn_tensordef *outputs,
-                         uint32_t n_outputs) {
-    if (mFn_execute_new == nullptr) {
-        return -1;
-    }
-
+int Controller::execute_new(hexagon_nn_nn_id id, const hexagon_nn_tensordef* inputs,
+                            uint32_t n_inputs, hexagon_nn_tensordef* outputs, uint32_t n_outputs) {
     CONTROLLER_CHECK(execute_new, id, inputs, n_inputs, outputs, n_outputs);
-
-    return 0;
 }
 
-int Controller::execute(hexagon_nn_nn_id id,
-                     uint32_t batches_in,
-                     uint32_t height_in,
-                     uint32_t width_in,
-                     uint32_t depth_in,
-                     const uint8_t *data_in,
-                     uint32_t data_len_in,
-                     uint32_t *batches_out,
-                     uint32_t *height_out,
-                     uint32_t *width_out,
-                     uint32_t *depth_out,
-                     uint8_t *data_out,
-                     uint32_t data_out_max,
-                     uint32_t *data_out_size) {
-    if (mFn_execute == nullptr) {
-        return -1;
-    }
-
+int Controller::execute(hexagon_nn_nn_id id, uint32_t batches_in, uint32_t height_in,
+                        uint32_t width_in, uint32_t depth_in, const uint8_t* data_in,
+                        uint32_t data_len_in, uint32_t* batches_out, uint32_t* height_out,
+                        uint32_t* width_out, uint32_t* depth_out, uint8_t* data_out,
+                        uint32_t data_out_max, uint32_t* data_out_size) {
     CONTROLLER_CHECK(execute, id, batches_in, height_in, width_in, depth_in, data_in, data_len_in,
                      batches_out, height_out, width_out, depth_out, data_out, data_out_max,
                      data_out_size);
-
-    return 0;
 }
 
 int Controller::teardown(hexagon_nn_nn_id id) {
-    if (mFn_teardown == nullptr) {
-        return -1;
-    }
-
     CONTROLLER_CHECK(teardown, id);
-
-    return 0;
 }
 
-int Controller::get_perfinfo(hexagon_nn_nn_id id,
-                          hexagon_nn_perfinfo *info_out,
-                          unsigned int info_out_len,
-                          unsigned int *n_items_out) {
-    if (mFn_get_perfinfo == nullptr) {
-        return -1;
-    }
-
+int Controller::get_perfinfo(hexagon_nn_nn_id id, hexagon_nn_perfinfo* info_out,
+                             unsigned int info_out_len, unsigned int* n_items_out) {
     CONTROLLER_CHECK(get_perfinfo, id, info_out, info_out_len, n_items_out);
-
-    return 0;
 }
 
 int Controller::reset_perfinfo(hexagon_nn_nn_id id, uint32_t event) {
-    if (mFn_reset_perfinfo == nullptr) {
-        return -1;
-    }
-
     CONTROLLER_CHECK(reset_perfinfo, id, event);
-
-    return 0;
 }
 
-int Controller::version(int *ver) {
-    if (mFn_version == nullptr) {
-        return -1;
-    }
-
+int Controller::version(int* ver) {
     CONTROLLER_CHECK(version, ver);
-
-    return 0;
 }
 
-int Controller::last_execution_cycles(hexagon_nn_nn_id id,
-                                   unsigned int *cycles_lo,
-                                   unsigned int *cycles_hi) {
-    if (mFn_last_execution_cycles == nullptr) {
-        return -1;
-    }
-
+int Controller::last_execution_cycles(hexagon_nn_nn_id id, unsigned int* cycles_lo,
+                                      unsigned int* cycles_hi) {
     CONTROLLER_CHECK(last_execution_cycles, id, cycles_lo, cycles_hi);
-
-    return 0;
 }
 
-int Controller::GetHexagonBinaryVersion(int *ver) {
-    if (mFn_GetHexagonBinaryVersion == nullptr) {
-        return -1;
-    }
-
+int Controller::GetHexagonBinaryVersion(int* ver) {
     CONTROLLER_CHECK(GetHexagonBinaryVersion, ver);
-
-    return 0;
 }
 
-int Controller::PrintLog(const uint8_t *data_in, unsigned int data_in_len) {
-    if (mFn_PrintLog == nullptr) {
-        return -1;
-    }
-
+int Controller::PrintLog(const uint8_t* data_in, unsigned int data_in_len) {
     CONTROLLER_CHECK(PrintLog, data_in, data_in_len);
-
-    return 0;
 }
 
-int Controller::op_name_to_id(const char *name, unsigned int *id) {
-    if (mFn_op_name_to_id == nullptr) {
-        return -1;
-    }
-
+int Controller::op_name_to_id(const char* name, unsigned int* id) {
     CONTROLLER_CHECK(op_name_to_id, name, id);
-
-    return 0;
 }
 
-int Controller::op_id_to_name(const unsigned int id, char *name, int name_len) {
-    if (mFn_op_id_to_name == nullptr) {
-        return -1;
-    }
-
+int Controller::op_id_to_name(const unsigned int id, char* name, int name_len) {
     CONTROLLER_CHECK(op_id_to_name, id, name, name_len);
-
-    return 0;
 }
 
 int Controller::disable_dcvs() {
-    if (mFn_disable_dcvs == nullptr) {
-        return -1;
-    }
-
     CONTROLLER_CHECK(disable_dcvs);
-
-    return 0;
 }
 
 int Controller::set_powersave_level(unsigned int level) {
-    if (mFn_set_powersave_level == nullptr) {
-        return -1;
-    }
-
     CONTROLLER_CHECK(set_powersave_level, level);
-
-    return 0;
 }
 
 int Controller::config() {
-    if (mFn_config == nullptr) {
-        return -1;
-    }
-
     CONTROLLER_CHECK(config);
-
-    return 0;
 }
 
-} // namespace hexagon
-} // namespace implementation
-} // namespace V1_0
-} // namespace neuralnetworks
-} // namespace hardware
-} // namespace android
+unsigned int Controller::get_dsp_offset() {
+    CONTROLLER_CHECK(get_dsp_offset);
+}
+
+int Controller::boost(int bus_usage) {
+    CONTROLLER_CHECK(boost, bus_usage);
+}
+
+int Controller::slow() {
+    CONTROLLER_CHECK(slow);
+}
+
+}  // namespace hexagon
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace neuralnetworks
+}  // namespace hardware
+}  // namespace android
diff --git a/1.0/HexagonController.h b/1.0/HexagonController.h
index 04775ec..ce7e8e7 100644
--- a/1.0/HexagonController.h
+++ b/1.0/HexagonController.h
@@ -17,10 +17,10 @@
 #ifndef ANDROID_HARDWARE_V1_0_HEXAGON_CONTROLLER_H
 #define ANDROID_HARDWARE_V1_0_HEXAGON_CONTROLLER_H
 
+#include <android-base/logging.h>
 #include "HexagonUtils.h"
 #include "dlfcn.h"
 #include "hexagon_nn_controller/hexagon_nn_controller.h"
-#include <android-base/logging.h>
 
 namespace android {
 namespace hardware {
@@ -31,17 +31,17 @@
 
 // interface wrapper
 class Controller {
-
-// methods
-private:
+    // methods
+   private:
     Controller();
     ~Controller();
-    Controller(const Controller&)            = delete;
-    Controller(Controller&&)                 = delete;
+    Controller(const Controller&) = delete;
+    Controller(Controller&&) = delete;
     Controller& operator=(const Controller&) = delete;
-    Controller& operator=(Controller&&)      = delete;
+    Controller& operator=(Controller&&) = delete;
 
-    template<typename Function> Function loadFunction(const char* name) {
+    template <typename Function>
+    Function loadFunction(const char* name) {
         void* fn = dlsym(mHandle, name);
         if (fn == nullptr) {
             LOG(ERROR) << "loadFunction -- failed to load function " << name;
@@ -49,80 +49,58 @@
         return reinterpret_cast<Function>(fn);
     }
 
-public:
+    bool openNnlib();
+    bool closeNnlib();
+
+   public:
     static Controller& getInstance();
+    bool resetNnlib();
 
-    hexagon_nn_nn_id init();
+    int init(hexagon_nn_nn_id* g);
 
-    int getlog(hexagon_nn_nn_id id, unsigned char *buf, uint32_t length);
+    int getlog(hexagon_nn_nn_id id, unsigned char* buf, uint32_t length);
 
-    int snpprint(hexagon_nn_nn_id id, unsigned char *buf, uint32_t length);
+    int snpprint(hexagon_nn_nn_id id, unsigned char* buf, uint32_t length);
 
     int set_debug_level(hexagon_nn_nn_id id, int level);
 
     int prepare(hexagon_nn_nn_id id);
 
-    int append_node(hexagon_nn_nn_id id,
-                    uint32_t node_id,
-                    op_type operation,
-                    hexagon_nn_padding_type padding,
-                    const hexagon_nn_input *inputs,
-                    uint32_t num_inputs,
-                    const hexagon_nn_output *outputs,
-                    uint32_t num_outputs);
+    int append_node(hexagon_nn_nn_id id, uint32_t node_id, op_type operation,
+                    hexagon_nn_padding_type padding, const hexagon_nn_input* inputs,
+                    uint32_t num_inputs, const hexagon_nn_output* outputs, uint32_t num_outputs);
 
-    int append_const_node(hexagon_nn_nn_id id,
-                          uint32_t node_id,
-                          uint32_t batches,
-                          uint32_t height,
-                          uint32_t width,
-                          uint32_t depth,
-                          const uint8_t *data,
-                          uint32_t data_len);
+    int append_const_node(hexagon_nn_nn_id id, uint32_t node_id, uint32_t batches, uint32_t height,
+                          uint32_t width, uint32_t depth, const uint8_t* data, uint32_t data_len);
 
-    int execute_new(hexagon_nn_nn_id id,
-                    const hexagon_nn_tensordef *inputs,
-                    uint32_t n_inputs,
-                    hexagon_nn_tensordef *outputs,
-                    uint32_t n_outputs);
+    int execute_new(hexagon_nn_nn_id id, const hexagon_nn_tensordef* inputs, uint32_t n_inputs,
+                    hexagon_nn_tensordef* outputs, uint32_t n_outputs);
 
-    int execute(hexagon_nn_nn_id id,
-                uint32_t batches_in,
-                uint32_t height_in,
-                uint32_t width_in,
-                uint32_t depth_in,
-                const uint8_t *data_in,
-                uint32_t data_len_in,
-                uint32_t *batches_out,
-                uint32_t *height_out,
-                uint32_t *width_out,
-                uint32_t *depth_out,
-                uint8_t *data_out,
-                uint32_t data_out_max,
-                uint32_t *data_out_size);
+    int execute(hexagon_nn_nn_id id, uint32_t batches_in, uint32_t height_in, uint32_t width_in,
+                uint32_t depth_in, const uint8_t* data_in, uint32_t data_len_in,
+                uint32_t* batches_out, uint32_t* height_out, uint32_t* width_out,
+                uint32_t* depth_out, uint8_t* data_out, uint32_t data_out_max,
+                uint32_t* data_out_size);
 
     int teardown(hexagon_nn_nn_id id);
 
-    int get_perfinfo(hexagon_nn_nn_id id,
-                     hexagon_nn_perfinfo *info_out,
-                     unsigned int info_out_len,
-                     unsigned int *n_items_out);
+    int get_perfinfo(hexagon_nn_nn_id id, hexagon_nn_perfinfo* info_out, unsigned int info_out_len,
+                     unsigned int* n_items_out);
 
     int reset_perfinfo(hexagon_nn_nn_id id, uint32_t event);
 
-    int version(int *ver);
+    int version(int* ver);
 
-    int last_execution_cycles(hexagon_nn_nn_id id,
-                              unsigned int *cycles_lo,
-                              unsigned int *cycles_hi);
+    int last_execution_cycles(hexagon_nn_nn_id id, unsigned int* cycles_lo,
+                              unsigned int* cycles_hi);
 
-    int GetHexagonBinaryVersion(int *ver);
+    int GetHexagonBinaryVersion(int* ver);
 
-    int PrintLog(const uint8_t *data_in, unsigned int data_in_len);
+    int PrintLog(const uint8_t* data_in, unsigned int data_in_len);
 
-    int op_name_to_id(const char *name, unsigned int *id);
+    int op_name_to_id(const char* name, unsigned int* id);
 
-    int op_id_to_name(const unsigned int id, char *name, int name_len);
+    int op_id_to_name(const unsigned int id, char* name, int name_len);
 
     int disable_dcvs();
 
@@ -130,38 +108,47 @@
 
     int config();
 
-// members
-private:
-    void*                                         mHandle;
-    hexagon_nn_controller_init_fn                    mFn_init;
-    hexagon_nn_controller_getlog_fn                  mFn_getlog;
-    hexagon_nn_controller_snpprint_fn                mFn_snpprint;
-    hexagon_nn_controller_set_debug_level_fn         mFn_set_debug_level;
-    hexagon_nn_controller_prepare_fn                 mFn_prepare;
-    hexagon_nn_controller_append_node_fn             mFn_append_node;
-    hexagon_nn_controller_append_const_node_fn       mFn_append_const_node;
-    hexagon_nn_controller_execute_new_fn             mFn_execute_new;
-    hexagon_nn_controller_execute_fn                 mFn_execute;
-    hexagon_nn_controller_teardown_fn                mFn_teardown;
-    hexagon_nn_controller_get_perfinfo_fn            mFn_get_perfinfo;
-    hexagon_nn_controller_reset_perfinfo_fn          mFn_reset_perfinfo;
-    hexagon_nn_controller_version_fn                 mFn_version;
-    hexagon_nn_controller_last_execution_cycles_fn   mFn_last_execution_cycles;
-    hexagon_nn_controller_GetHexagonBinaryVersion_fn mFn_GetHexagonBinaryVersion;
-    hexagon_nn_controller_PrintLog_fn                mFn_PrintLog;
-    hexagon_nn_controller_op_name_to_id_fn           mFn_op_name_to_id;
-    hexagon_nn_controller_op_id_to_name_fn           mFn_op_id_to_name;
-    hexagon_nn_controller_disable_dcvs_fn            mFn_disable_dcvs;
-    hexagon_nn_controller_set_powersave_level_fn     mFn_set_powersave_level;
-    hexagon_nn_controller_config_fn                  mFn_config;
+    unsigned int get_dsp_offset();
 
+    int boost(int bus_usage);
+
+    int slow();
+
+    // members
+   private:
+    static const char kFilename[];
+    void* mHandle;
+    hexagon_nn_controller_init_fn mFn_init;
+    hexagon_nn_controller_getlog_fn mFn_getlog;
+    hexagon_nn_controller_snpprint_fn mFn_snpprint;
+    hexagon_nn_controller_set_debug_level_fn mFn_set_debug_level;
+    hexagon_nn_controller_prepare_fn mFn_prepare;
+    hexagon_nn_controller_append_node_fn mFn_append_node;
+    hexagon_nn_controller_append_const_node_fn mFn_append_const_node;
+    hexagon_nn_controller_execute_new_fn mFn_execute_new;
+    hexagon_nn_controller_execute_fn mFn_execute;
+    hexagon_nn_controller_teardown_fn mFn_teardown;
+    hexagon_nn_controller_get_perfinfo_fn mFn_get_perfinfo;
+    hexagon_nn_controller_reset_perfinfo_fn mFn_reset_perfinfo;
+    hexagon_nn_controller_version_fn mFn_version;
+    hexagon_nn_controller_last_execution_cycles_fn mFn_last_execution_cycles;
+    hexagon_nn_controller_GetHexagonBinaryVersion_fn mFn_GetHexagonBinaryVersion;
+    hexagon_nn_controller_PrintLog_fn mFn_PrintLog;
+    hexagon_nn_controller_op_name_to_id_fn mFn_op_name_to_id;
+    hexagon_nn_controller_op_id_to_name_fn mFn_op_id_to_name;
+    hexagon_nn_controller_disable_dcvs_fn mFn_disable_dcvs;
+    hexagon_nn_controller_set_powersave_level_fn mFn_set_powersave_level;
+    hexagon_nn_controller_config_fn mFn_config;
+    hexagon_nn_controller_get_dsp_offset_fn mFn_get_dsp_offset;
+    hexagon_nn_controller_boost_fn mFn_boost;
+    hexagon_nn_controller_slow_fn mFn_slow;
 };
 
-} // namespace hexagon
-} // namespace implementation
-} // namespace V1_0
-} // namespace neuralnetworks
-} // namespace hardware
-} // namespace android
+}  // namespace hexagon
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace neuralnetworks
+}  // namespace hardware
+}  // namespace android
 
-#endif // ANDROID_HARDWARE_V1_0_HEXAGON_CONTROLLER_H
+#endif  // ANDROID_HARDWARE_V1_0_HEXAGON_CONTROLLER_H
diff --git a/1.0/HexagonModel.cpp b/1.0/HexagonModel.cpp
index 6e7e773..859d5fd 100644
--- a/1.0/HexagonModel.cpp
+++ b/1.0/HexagonModel.cpp
@@ -17,9 +17,9 @@
 #define LOG_TAG "android.hardware.neuralnetworks@1.0-impl-hvx"
 
 #include "HexagonModel.h"
-#include "HexagonOperations.h"
 #include <numeric>
 #include <unordered_set>
+#include "HexagonOperations.h"
 
 namespace android {
 namespace hardware {
@@ -34,29 +34,26 @@
     for (size_t i = 0; i < model.operands.size(); ++i) {
         const Operand& operand = model.operands[i];
         info[i] = {
-            .type       = operand.type,
+            .type = operand.type,
             .dimensions = operand.dimensions,
-            .scale      = operand.scale,
-            .zeroPoint  = operand.zeroPoint,
-            .lifetime   = operand.lifetime,
-            .buffer     = const_cast<uint8_t*>(getData(operand, model.operandValues, pools)),
-            .length     = operand.location.length,
+            .scale = operand.scale,
+            .zeroPoint = operand.zeroPoint,
+            .lifetime = operand.lifetime,
+            .buffer = const_cast<uint8_t*>(getData(operand, model.operandValues, pools)),
+            .length = operand.location.length,
         };
     }
     return info;
 }
 
-Model::Model(const NeuralnetworksModel& model) : mNodeCount(0), mCompiled(false) {
-    mGraphId = hexagon::Controller::getInstance().init();
-    hexagon::Controller::getInstance().set_debug_level(mGraphId, 99);
-
+Model::Model(const NeuralnetworksModel& model) : mGraphId(0), mNodeCount(0), mCompiled(false) {
     mPools = mapPools(model.pools);
     mOperands = getOperandsInfo(model, mPools);
     std::for_each(mPools.begin(), mPools.end(), [](RunTimePoolInfo& mem) { mem.update(); });
 
     mOperations = model.operations;
-    mInputs     = model.inputIndexes;
-    mOutputs    = model.outputIndexes;
+    mInputs = model.inputIndexes;
+    mOutputs = model.outputIndexes;
 }
 
 Model::Model(Model&& other) {
@@ -64,41 +61,42 @@
 }
 
 Model& Model::operator=(Model&& other) {
-    mNodeCount      = other.mNodeCount;
-    mGraphId        = other.mGraphId;
-    mCompiled       = other.mCompiled;
-    mOperands       = std::move(other.mOperands);
-    mOperations     = std::move(other.mOperations);
-    mInputs         = std::move(other.mInputs);
-    mOutputs        = std::move(other.mOutputs);
-    mPools          = std::move(other.mPools);
-    other.mGraphId  = {};
-    other.mCompiled = false;
+    if (this != &other) {
+        mNodeCount = other.mNodeCount;
+        mGraphId = other.mGraphId;
+        mCompiled = other.mCompiled;
+        mOperands = std::move(other.mOperands);
+        mOperations = std::move(other.mOperations);
+        mInputs = std::move(other.mInputs);
+        mOutputs = std::move(other.mOutputs);
+        mPools = std::move(other.mPools);
+        other.mNodeCount = 0;
+        other.mGraphId = {};
+        other.mCompiled = false;
+    }
     return *this;
 }
 
 Model::~Model() {
-    if (mGraphId != hexagon_nn_nn_id{}) {
-        hexagon::Controller::getInstance().teardown(mGraphId);
-    }
-}
-
-std::string Model::getDebugLog() {
-    char buffer[16*1024];
-    int err = hexagon::Controller::getInstance().getlog(
-            mGraphId, reinterpret_cast<uint8_t*>(buffer), sizeof(buffer));
-    HEXAGON_SOFT_ASSERT_EQ(0, err, "failed getDebugLog");
-    return buffer;
+    clearModel();
 }
 
 std::string Model::getLog() {
-    char buffer[16*1024];
-    int err = hexagon::Controller::getInstance().snpprint(
-            mGraphId, reinterpret_cast<uint8_t*>(buffer), sizeof(buffer));
+    char buffer[16 * 1024];
+    int err = hexagon::Controller::getInstance().getlog(
+        mGraphId, reinterpret_cast<uint8_t*>(buffer), sizeof(buffer));
     HEXAGON_SOFT_ASSERT_EQ(0, err, "failed getLog");
     return buffer;
 }
 
+std::string Model::getGraph() {
+    char buffer[16 * 1024];
+    int err = hexagon::Controller::getInstance().snpprint(
+        mGraphId, reinterpret_cast<uint8_t*>(buffer), sizeof(buffer));
+    HEXAGON_SOFT_ASSERT_EQ(0, err, "failed getGraph");
+    return buffer;
+}
+
 uint32_t Model::getNextNode() {
     return ++mNodeCount;
 }
@@ -109,34 +107,34 @@
 
 Shape Model::getShape(uint32_t operand) {
     return {
-        .type       = mOperands[operand].type,
+        .type = mOperands[operand].type,
         .dimensions = mOperands[operand].dimensions,
-        .scale      = mOperands[operand].scale,
-        .offset     = mOperands[operand].zeroPoint,
+        .scale = mOperands[operand].scale,
+        .offset = mOperands[operand].zeroPoint,
     };
 }
 
 bool Model::setShape(uint32_t operand, const Shape& shape) {
     const hexagon_nn_output& output = mOperands[operand].hexagon_output;
     HEXAGON_SOFT_ASSERT_EQ(output, hexagon_nn_output{}, "Output has already been set");
-    //mOperands[operand].type       = shape.type;
+    // mOperands[operand].type       = shape.type;
     mOperands[operand].dimensions = shape.dimensions;
-    //mOperands[operand].scale      = shape.scale;
-    //mOperands[operand].zeroPoint  = shape.offset;
+    // mOperands[operand].scale      = shape.scale;
+    // mOperands[operand].zeroPoint  = shape.offset;
     return true;
 }
 
 bool Model::isConstant(uint32_t operand) {
     OperandLifeTime lifetime = mOperands[operand].lifetime;
     return lifetime == OperandLifeTime::CONSTANT_COPY ||
-            lifetime == OperandLifeTime::CONSTANT_REFERENCE;
+           lifetime == OperandLifeTime::CONSTANT_REFERENCE;
 }
 
 hexagon_nn_input Model::createTensorInternal(uint32_t B, uint32_t H, uint32_t W, uint32_t D,
                                              const uint8_t* ptr, size_t size) {
     uint32_t node = getNextNode();
-    bool success = hexagon::Controller::getInstance().append_const_node(
-            mGraphId, node, B, H, W, D, ptr, size) == 0;
+    bool success = hexagon::Controller::getInstance().append_const_node(mGraphId, node, B, H, W, D,
+                                                                        ptr, size) == 0;
     HEXAGON_SOFT_ASSERT(success, "Failed to create tensor");
     return {.src_id = node, .output_idx = 0};
 }
@@ -150,8 +148,8 @@
     const OperandInfo& operand = mOperands[operandIndex];
     std::vector<uint32_t> dims = getAlignedDimensions(operand.dimensions, 4);
     HEXAGON_SOFT_ASSERT_NE(0ul, dims.size(), "Rank must be at most 4");
-    hexagon_nn_input result = createTensorInternal(dims[0], dims[1], dims[2], dims[3],
-                                                   operand.buffer, operand.length);
+    hexagon_nn_input result =
+        createTensorInternal(dims[0], dims[1], dims[2], dims[3], operand.buffer, operand.length);
     HEXAGON_SOFT_ASSERT_NE(hexagon_nn_input{}, result, "Failed to add operand");
     return result;
 }
@@ -168,7 +166,9 @@
     OperandInfo& operandInfo = mOperands[operand];
     if (operandInfo.hexagon_input_min == hexagon_nn_input{}) {
         float real_value =
-                (std::numeric_limits<uint8_t>::min() - operandInfo.zeroPoint) * operandInfo.scale;
+            operandInfo.type == OperandType::TENSOR_QUANT8_ASYMM
+                ? (std::numeric_limits<uint8_t>::min() - operandInfo.zeroPoint) * operandInfo.scale
+                : std::numeric_limits<uint32_t>::min() * operandInfo.scale;
         operandInfo.hexagon_input_min = createValues<float>({real_value});
     }
     return operandInfo.hexagon_input_min;
@@ -178,7 +178,9 @@
     OperandInfo& operandInfo = mOperands[operand];
     if (operandInfo.hexagon_input_max == hexagon_nn_input{}) {
         float real_value =
-                (std::numeric_limits<uint8_t>::max() - operandInfo.zeroPoint) * operandInfo.scale;
+            operandInfo.type == OperandType::TENSOR_QUANT8_ASYMM
+                ? (std::numeric_limits<uint8_t>::max() - operandInfo.zeroPoint) * operandInfo.scale
+                : std::numeric_limits<uint32_t>::max() * operandInfo.scale;
         operandInfo.hexagon_input_max = createValues<float>({real_value});
     }
     return operandInfo.hexagon_input_max;
@@ -189,7 +191,7 @@
     return hexagon::getPadding(padding);
 }
 
-hexagon_nn_input Model::createQuantizationValue(uint32_t operand, uint32_t quant_value) {
+hexagon_nn_input Model::createQuantizationValue(uint32_t operand, int32_t quant_value) {
     OperandInfo& operandInfo = mOperands[operand];
     float real_value = (quant_value - operandInfo.zeroPoint) * operandInfo.scale;
     return createValues<float>({real_value});
@@ -201,16 +203,19 @@
     HEXAGON_SOFT_ASSERT_NE(0ul, dims.size(), "Need at most 4 dimensions");
     // NHWC --> HWCN
     if (getShape(operand).type == OperandType::TENSOR_FLOAT32) {
-        std::vector<float> transposed = transpose<float>(dims[0], dims[1]*dims[2]*dims[3],
-                reinterpret_cast<const float*>(operandInfo.buffer));
+        std::vector<float> transposed =
+            transpose<float>(dims[0], dims[1] * dims[2] * dims[3],
+                             reinterpret_cast<const float*>(operandInfo.buffer));
         return createTensorInternal(dims[1], dims[2], dims[3], dims[0],
-                reinterpret_cast<const uint8_t*>(transposed.data()), operandInfo.length);
-    }
-    else {
-        std::vector<uint8_t> transposed = transpose<uint8_t>(dims[0], dims[1]*dims[2]*dims[3],
-                reinterpret_cast<const uint8_t*>(operandInfo.buffer));
+                                    reinterpret_cast<const uint8_t*>(transposed.data()),
+                                    operandInfo.length);
+    } else {
+        std::vector<uint8_t> transposed =
+            transpose<uint8_t>(dims[0], dims[1] * dims[2] * dims[3],
+                               reinterpret_cast<const uint8_t*>(operandInfo.buffer));
         return createTensorInternal(dims[1], dims[2], dims[3], dims[0],
-                reinterpret_cast<const uint8_t*>(transposed.data()), operandInfo.length);
+                                    reinterpret_cast<const uint8_t*>(transposed.data()),
+                                    operandInfo.length);
     }
 }
 
@@ -220,8 +225,7 @@
     HEXAGON_SOFT_ASSERT_NE(0ul, dims.size(), "Need at most 4 dimensions");
     // NHWC --> HWCN
     return createTensorInternal(dims[1], dims[2], dims[3] / depth_multiplier,
-                                dims[0] * depth_multiplier,
-                                operandInfo.buffer, operandInfo.length);
+                                dims[0] * depth_multiplier, operandInfo.buffer, operandInfo.length);
 }
 
 hexagon_nn_input Model::createFullyConnectedWeightTensor(uint32_t operand) {
@@ -232,16 +236,17 @@
     uint32_t num_units = dims[0] * dims[1] * dims[2];
     uint32_t input_size = dims[3];
     if (getShape(operand).type == OperandType::TENSOR_FLOAT32) {
-        std::vector<float> transposed = transpose<float>(num_units, input_size,
-                reinterpret_cast<const float*>(operandInfo.buffer));
+        std::vector<float> transposed = transpose<float>(
+            num_units, input_size, reinterpret_cast<const float*>(operandInfo.buffer));
         return createTensorInternal(1, 1, input_size, num_units,
-                reinterpret_cast<const uint8_t*>(transposed.data()), operandInfo.length);
-    }
-    else {
-        std::vector<uint8_t> transposed = transpose<uint8_t>(num_units, input_size,
-                reinterpret_cast<const uint8_t*>(operandInfo.buffer));
+                                    reinterpret_cast<const uint8_t*>(transposed.data()),
+                                    operandInfo.length);
+    } else {
+        std::vector<uint8_t> transposed = transpose<uint8_t>(
+            num_units, input_size, reinterpret_cast<const uint8_t*>(operandInfo.buffer));
         return createTensorInternal(1, 1, input_size, num_units,
-                reinterpret_cast<const uint8_t*>(transposed.data()), operandInfo.length);
+                                    reinterpret_cast<const uint8_t*>(transposed.data()),
+                                    operandInfo.length);
     }
 }
 
@@ -279,8 +284,11 @@
     HEXAGON_SOFT_ASSERT(verifyOperationOutputs(outputs),
                         "error adding operation: one or more outputs is invalid");
     uint32_t node = getNextNode();
-    return hexagon::Controller::getInstance().append_node(mGraphId, node, op, pad,
-            inputs.data(), inputs.size(), outputs.data(), outputs.size()) == 0 ? node : 0;
+    return hexagon::Controller::getInstance().append_node(mGraphId, node, op, pad, inputs.data(),
+                                                          inputs.size(), outputs.data(),
+                                                          outputs.size()) == 0
+               ? node
+               : 0;
 }
 
 std::vector<hexagon_nn_output> Model::getHexagonOutputs(const std::vector<uint32_t>& operands) {
@@ -370,7 +378,8 @@
     HEXAGON_SOFT_ASSERT_NE(0, node, "Error adding base operation");
 
     std::vector<hexagon_nn_input> buffer_in = {{.src_id = node, .output_idx = 0},
-            {.src_id = node, .output_idx = 1}, {.src_id = node, .output_idx = 2}};
+                                               {.src_id = node, .output_idx = 1},
+                                               {.src_id = node, .output_idx = 2}};
     buffer_in.insert(buffer_in.end(), actArgs.begin(), actArgs.end());
     node = addOperationInternal(activation, NN_PAD_NA, buffer_in, outs);
     HEXAGON_SOFT_ASSERT_NE(0, node, "Error adding activation operation");
@@ -378,10 +387,8 @@
     return registerHexagonInputs(outputs, node);
 }
 
-bool Model::addFusedFloatOperation(op_type op,
-                                   hexagon_nn_padding_type pad,
-                                   const hexagon_nn_input& bias,
-                                   op_type activation,
+bool Model::addFusedFloatOperation(op_type op, hexagon_nn_padding_type pad,
+                                   const hexagon_nn_input& bias, op_type activation,
                                    const std::vector<hexagon_nn_input>& inputs,
                                    const std::vector<uint32_t>& outputs) {
     HEXAGON_SOFT_ASSERT_EQ(1, outputs.size(), "addFusedFloatOperation requires 1 output");
@@ -398,18 +405,18 @@
         HEXAGON_SOFT_ASSERT_NE(0, node, "Error adding bias operation");
     }
 
-    std::vector<hexagon_nn_input> buffer2_in = {{.src_id = node, .output_idx = 0}};
-    buffer2_in.insert(buffer2_in.end(), actArgs.begin(), actArgs.end());
-    node = addOperationInternal(activation, NN_PAD_NA, buffer2_in, outs);
-    HEXAGON_SOFT_ASSERT_NE(0, node, "Error adding activation operation");
+    if (activation != OP_Nop) {
+        std::vector<hexagon_nn_input> buffer2_in = {{.src_id = node, .output_idx = 0}};
+        buffer2_in.insert(buffer2_in.end(), actArgs.begin(), actArgs.end());
+        node = addOperationInternal(activation, NN_PAD_NA, buffer2_in, outs);
+        HEXAGON_SOFT_ASSERT_NE(0, node, "Error adding activation operation");
+    }
 
     return registerHexagonInputs(outputs, node);
 }
 
-bool Model::addFusedQuant8Operation(op_type op,
-                                    hexagon_nn_padding_type pad,
-                                    const hexagon_nn_input& bias,
-                                    op_type activation,
+bool Model::addFusedQuant8Operation(op_type op, hexagon_nn_padding_type pad,
+                                    const std::vector<hexagon_nn_input>& bias, op_type activation,
                                     const std::vector<hexagon_nn_input>& inputs,
                                     const std::vector<uint32_t>& outputs) {
     HEXAGON_SOFT_ASSERT_EQ(1, outputs.size(), "addFusedQuant8Operation requires 1 output");
@@ -418,10 +425,10 @@
     const hexagon_nn_input& new_max = getQuantizationMax(outputs[0]);
     uint32_t node;
 
-    hexagon_nn_output tensor_out8 = make_hexagon_nn_output(mOperands[outputs[0]].dimensions,
-                                                           sizeof(uint8_t));
-    hexagon_nn_output tensor_out32 = make_hexagon_nn_output(mOperands[outputs[0]].dimensions,
-                                                            sizeof(int32_t));
+    hexagon_nn_output tensor_out8 =
+        make_hexagon_nn_output(mOperands[outputs[0]].dimensions, sizeof(uint8_t));
+    hexagon_nn_output tensor_out32 =
+        make_hexagon_nn_output(mOperands[outputs[0]].dimensions, sizeof(int32_t));
     hexagon_nn_output scalar_out32 = make_hexagon_nn_output({1, 1, 1, 1}, sizeof(float));
 
     std::vector<hexagon_nn_output> out8 = {tensor_out8, scalar_out32, scalar_out32};
@@ -430,29 +437,36 @@
     // base operation
     node = addOperationInternal(op, pad, inputs, out32);
     HEXAGON_SOFT_ASSERT_NE(0, node, "Error adding base operation");
-    const hexagon_nn_input old_min = {.src_id = node, .output_idx = 1};
-    const hexagon_nn_input old_max = {.src_id = node, .output_idx = 2};
+    hexagon_nn_input previous = {.src_id = node, .output_idx = 0};
+    hexagon_nn_input previous_min = {.src_id = node, .output_idx = 1};
+    hexagon_nn_input previous_max = {.src_id = node, .output_idx = 2};
 
     // add bias
-    if (bias != hexagon_nn_input{}) {
-        std::vector<hexagon_nn_input> buffer1_in = {{.src_id = node, .output_idx = 0}, bias,
-                                                    old_min, old_max, old_min, old_max};
-        node = addOperationInternal(OP_QuantizedBiasAdd_32p32to32, NN_PAD_NA, buffer1_in, out32);
+    if (bias.size() == 3) {
+        node = addOperationInternal(
+            OP_QuantizedBiasAdd_32p32to32, NN_PAD_NA,
+            {previous, bias[0], previous_min, previous_max, bias[1], bias[2]}, out32);
         HEXAGON_SOFT_ASSERT_NE(0, node, "Error adding bias operation");
+        previous.src_id = node;
+        previous_min.src_id = node;
+        previous_max.src_id = node;
     }
 
     // requantize
-    const hexagon_nn_input buffer2_in = {.src_id = node, .output_idx = 0};
     node = addOperationInternal(OP_Requantize_32to8, NN_PAD_NA,
-                                {buffer2_in, old_min, old_max, new_min, new_max}, out8);
+                                {previous, previous_min, previous_max, new_min, new_max}, out8);
     HEXAGON_SOFT_ASSERT_NE(0, node, "Error adding requantize operation");
+    previous.src_id = node;
+    previous_min.src_id = node;
+    previous_max.src_id = node;
 
     // activation
-    std::vector<hexagon_nn_input> buffer3 = {{.src_id = node, .output_idx = 0},
-            {.src_id = node, .output_idx = 1}, {.src_id = node, .output_idx = 2}};
-    buffer3.insert(buffer3.end(), actArgs.begin(), actArgs.end());
-    node = addOperationInternal(activation, NN_PAD_NA, buffer3, out8);
-    HEXAGON_SOFT_ASSERT_NE(0, node, "Error adding activation operation");
+    if (activation != OP_Nop) {
+        std::vector<hexagon_nn_input> buffer = {previous, previous_min, previous_max};
+        buffer.insert(buffer.end(), actArgs.begin(), actArgs.end());
+        node = addOperationInternal(activation, NN_PAD_NA, buffer, out8);
+        HEXAGON_SOFT_ASSERT_NE(0, node, "Error adding activation operation");
+    }
 
     return registerHexagonInputs(outputs, node);
 }
@@ -497,13 +511,18 @@
 bool Model::addOperations() {
     for (const Operation& operation : mOperations) {
         OperationType operationType = operation.type;
+
+        // For now, the operation type is always the same as its first operand
+        // parameter. If this changes in the future, this line of code will need
+        // to be updated.
         OperandType operandType = mOperands[operation.inputs[0]].type;
+
         OperationTuple opTuple = std::make_pair(operationType, operandType);
-        HEXAGON_SOFT_ASSERT(getOperationPrepareTable().find(opTuple) !=
-                            getOperationPrepareTable().end(),
-                            "Operation not found");
-        bool success = getOperationPrepareTable()[opTuple](
-                operation.inputs, operation.outputs, this);
+        HEXAGON_SOFT_ASSERT(
+            getOperationPrepareTable().find(opTuple) != getOperationPrepareTable().end(),
+            "Operation not found");
+        bool success =
+            getOperationPrepareTable()[opTuple](operation.inputs, operation.outputs, this);
         HEXAGON_SOFT_ASSERT(success, "error adding operation");
     }
     return true;
@@ -511,12 +530,30 @@
 
 bool Model::addOutputs() {
     // prepare OP_OUTPUT's inputs
-    std::vector<hexagon_nn_input> ins(mOutputs.size());
-    for (size_t i = 0; i < mOutputs.size(); ++i) {
-        OperandInfo& operand = mOperands[mOutputs[i]];
+    std::vector<hexagon_nn_input> ins;
+    for (size_t out : mOutputs) {
+        OperandInfo& operand = mOperands[out];
         HEXAGON_SOFT_ASSERT_NE(operand.hexagon_input, hexagon_nn_input{},
                                "output operand has not been registered");
-        ins[i] = operand.hexagon_input;
+
+        if (operand.type == OperandType::TENSOR_QUANT8_ASYMM) {
+            // Adjust quantized range of outputs
+            uint32_t dequant = addOperationInternal(
+                OP_Dequantize, NN_PAD_NA,
+                {operand.hexagon_input, operand.hexagon_input_min, operand.hexagon_input_max},
+                {make_hexagon_nn_output(operand.dimensions, sizeof(float))});
+            uint32_t quant =
+                addOperationInternal(OP_Quantize, NN_PAD_NA,
+                                     {{.src_id = dequant, .output_idx = 0},
+                                      createQuantizationValue(out, 0),
+                                      createQuantizationValue(out, 255)},
+                                     {make_hexagon_nn_output(operand.dimensions, sizeof(uint8_t)),
+                                      make_hexagon_nn_output({1, 1, 1, 1}, sizeof(float)),
+                                      make_hexagon_nn_output({1, 1, 1, 1}, sizeof(float))});
+            ins.push_back({.src_id = quant, .output_idx = 0});
+        } else {
+            ins.push_back(operand.hexagon_input);
+        }
     }
 
     // add single output node for entire graph
@@ -526,45 +563,61 @@
     return true;
 }
 
-void Model::resetModel() {
+void Model::clearModel() {
     mCompiled = false;
     for (OperandInfo& operand : mOperands) {
         operand.hexagon_input = {};
+        operand.hexagon_input_min = {};
+        operand.hexagon_input_max = {};
         operand.hexagon_output = {};
     }
     if (mGraphId != hexagon_nn_nn_id{}) {
         hexagon::Controller::getInstance().teardown(mGraphId);
     }
-    mGraphId = hexagon::Controller::getInstance().init();
-    hexagon::Controller::getInstance().set_debug_level(mGraphId, 99);
 }
 
 std::vector<bool> Model::supportedOperations() {
     std::vector<bool> supported(mOperations.size());
     for (size_t i = 0; i < supported.size(); ++i) {
         const Operation& operation = mOperations[i];
-        auto entry = getOperationCheckTable().find(operation.type);
+        OperationType operationType = operation.type;
+
+        // For now, the operation type is always the same as its first operand
+        // parameter. If this changes in the future, this line of code will need
+        // to be updated.
+        OperandType operandType = mOperands[operation.inputs[0]].type;
+
+        OperationTuple opTuple = std::make_pair(operationType, operandType);
+
+        auto entry = getOperationCheckTable().find(opTuple);
         if (entry != getOperationCheckTable().end()) {
             supported[i] = entry->second(operation.inputs, operation.outputs, this);
-        }
-        else {
+        } else {
             supported[i] = false;
         }
     }
     return supported;
 }
 
-bool Model::compile() {
+bool Model::prepare() {
     if (!verifyOperations() || !verifyOperands()) {
         return false;
     }
 
+    int err = hexagon::Controller::getInstance().init(&mGraphId);
+    HEXAGON_SOFT_ASSERT_EQ(0, err, "Hexagon could not allocate new graph");
+    HEXAGON_SOFT_ASSERT_NE(0, mGraphId, "Hexagon could not allocate new graph");
+    hexagon::Controller::getInstance().set_debug_level(mGraphId, 0);
+
     if (!addInputs() || !addOperations() || !addOutputs()) {
-        resetModel();
+        clearModel();
+        LOG(ERROR) << "Something went wrong. Clearing the model and aborting.";
         return false;
     }
 
-    int err = hexagon::Controller::getInstance().prepare(mGraphId);
+    err = hexagon::Controller::getInstance().prepare(mGraphId);
+
+    LOG(INFO) << "PrepareModel was " << (err == 0 ? "SUCCESSFUL" : "UNSUCCESSFUL");
 
     return err == 0;
 }
@@ -572,14 +625,14 @@
 static hexagon_nn_tensordef convertToTensordef(const OperandInfo& operand) {
     std::vector<uint32_t> dimensions = getAlignedDimensions(operand.dimensions, 4);
     return {
-        .batches        = dimensions[0],
-        .height         = dimensions[1],
-        .width          = dimensions[2],
-        .depth          = dimensions[3],
-        .data           = operand.buffer,
-        .dataLen        = static_cast<int32_t>(operand.length),
-        .data_valid_len = operand.length, // unused?
-        .unused         = 0,
+        .batches = dimensions[0],
+        .height = dimensions[1],
+        .width = dimensions[2],
+        .depth = dimensions[3],
+        .data = operand.buffer,
+        .dataLen = static_cast<int32_t>(operand.length),
+        .data_valid_len = operand.length,  // unused?
+        .unused = 0,
     };
 }
 
@@ -600,7 +653,7 @@
         newInfo.dimensions = inputOutput.dimensions;
     }
 
-    newInfo.buffer = pool.buffer + offset;
+    newInfo.buffer = pool.getBuffer() + offset;
     newInfo.length = getSize(newInfo);
 
     return newInfo;
@@ -626,18 +679,19 @@
     }
 
     // execute model
-    int err = hexagon::Controller::getInstance().execute_new(mGraphId, inputs.data(),
-                                                             inputs.size(), outputs.data(),
-                                                             outputs.size());
+    int err = hexagon::Controller::getInstance().execute_new(mGraphId, inputs.data(), inputs.size(),
+                                                             outputs.data(), outputs.size());
 
     std::for_each(pools.begin(), pools.end(), [](RunTimePoolInfo& pool) { pool.update(); });
 
+    LOG(INFO) << "EXECUTION WAS " << (err == 0 ? "SUCCESSFUL" : "UNSUCCESSFUL");
+
     return err == 0;
 }
 
-} // namespace hexagon
-} // namespace implementation
-} // namespace V1_0
-} // namespace neuralnetworks
-} // namespace hardware
-} // namespace android
+}  // namespace hexagon
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace neuralnetworks
+}  // namespace hardware
+}  // namespace android
diff --git a/1.0/HexagonModel.h b/1.0/HexagonModel.h
index 500c074..797620b 100644
--- a/1.0/HexagonModel.h
+++ b/1.0/HexagonModel.h
@@ -17,16 +17,16 @@
 #ifndef ANDROID_HARDWARE_V1_0_HEXAGON_MODEL_H
 #define ANDROID_HARDWARE_V1_0_HEXAGON_MODEL_H
 
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <atomic>
+#include <string>
+#include <vector>
 #include "CpuExecutor.h"
 #include "HexagonController.h"
 #include "HexagonOperations.h"
 #include "HexagonUtils.h"
 #include "OperationsUtils.h"
 #include "hexagon_nn_controller/hexagon_nn_controller.h"
-#include <android/hardware/neuralnetworks/1.0/types.h>
-#include <atomic>
-#include <string>
-#include <vector>
 
 namespace android {
 namespace hardware {
@@ -58,20 +58,18 @@
     uint32_t length;
 
     // Hexagon nnlib identifiers
-    hexagon_nn_input  hexagon_input;
-    hexagon_nn_input  hexagon_input_min;
-    hexagon_nn_input  hexagon_input_max;
+    hexagon_nn_input hexagon_input;
+    hexagon_nn_input hexagon_input_min;
+    hexagon_nn_input hexagon_input_max;
     hexagon_nn_output hexagon_output;
 };
 
-
 // interface wrapper
 class Model {
-public:
-
-// methods
-    Model()                        = delete;
-    Model(const Model&)            = delete;
+   public:
+    // methods
+    Model() = delete;
+    Model(const Model&) = delete;
     Model& operator=(const Model&) = delete;
     Model(Model&& other);
     Model& operator=(Model&& other);
@@ -79,8 +77,8 @@
     Model(const NeuralnetworksModel& model);
     ~Model();
 
-    std::string getDebugLog();
     std::string getLog();
+    std::string getGraph();
 
     // model check
     const int32_t* getPointer(uint32_t operand);
@@ -92,20 +90,24 @@
     const hexagon_nn_input& getTensor(uint32_t operand);
     const hexagon_nn_input& getQuantizationMin(uint32_t operand);
     const hexagon_nn_input& getQuantizationMax(uint32_t operand);
-    hexagon_nn_input createQuantizationValue(uint32_t operand, uint32_t quant_value);
+    hexagon_nn_input createQuantizationValue(uint32_t operand, int32_t quant_value);
     hexagon_nn_input createConvFilterTensor(uint32_t operand);
     hexagon_nn_input createDepthwiseFilterTensor(uint32_t operand, int32_t depth_multiplier);
     hexagon_nn_input createFullyConnectedWeightTensor(uint32_t operand);
-    template<typename Type> Type getScalar(uint32_t operand);
+    template <typename Type>
+    Type getScalar(uint32_t operand);
     op_type getFloatActivation(uint32_t operand);
     op_type getQuantizedActivation(uint32_t operand);
     hexagon_nn_padding_type getPadding(uint32_t operand);
 
-    template<typename Type> hexagon_nn_input createTensor(
-            uint32_t B, uint32_t H, uint32_t W, uint32_t D, const std::vector<Type>& values);
+    template <typename Type>
+    hexagon_nn_input createTensor(uint32_t B, uint32_t H, uint32_t W, uint32_t D,
+                                  const std::vector<Type>& values);
     hexagon_nn_input createShape(uint32_t B, uint32_t H, uint32_t W, uint32_t D);
-    template<typename Type> hexagon_nn_input createValues(const std::vector<Type>& values);
-    template<typename Type> hexagon_nn_input createScalar(Type value);
+    template <typename Type>
+    hexagon_nn_input createValues(const std::vector<Type>& values);
+    template <typename Type>
+    hexagon_nn_input createScalar(Type value);
 
     // model prepare operations
     bool addBasicOperation(op_type op, hexagon_nn_padding_type pad,
@@ -119,25 +121,20 @@
                                           op_type activation,
                                           const std::vector<hexagon_nn_input>& inputs,
                                           const std::vector<uint32_t>& outputs);
-    bool addFusedFloatOperation(op_type op,
-                                hexagon_nn_padding_type pad,
-                                const hexagon_nn_input& bias,
-                                op_type activation,
+    bool addFusedFloatOperation(op_type op, hexagon_nn_padding_type pad,
+                                const hexagon_nn_input& bias, op_type activation,
                                 const std::vector<hexagon_nn_input>& inputs,
                                 const std::vector<uint32_t>& outputs);
-    bool addFusedQuant8Operation(op_type op,
-                                 hexagon_nn_padding_type pad,
-                                 const hexagon_nn_input& bias,
-                                 op_type activation,
+    bool addFusedQuant8Operation(op_type op, hexagon_nn_padding_type pad,
+                                 const std::vector<hexagon_nn_input>& bias, op_type activation,
                                  const std::vector<hexagon_nn_input>& inputs,
                                  const std::vector<uint32_t>& outputs);
 
     std::vector<bool> supportedOperations();
-    bool compile();
+    bool prepare();
     bool execute(const Request& request);
 
-private:
-
+   private:
     uint32_t getNextNode();
     uint32_t addOperationInternal(op_type op, hexagon_nn_padding_type pad,
                                   const std::vector<hexagon_nn_input>& inputs,
@@ -155,49 +152,48 @@
     bool addOperations();
     bool addOutputs();
 
-    void resetModel();
+    void clearModel();
 
-// members
-    hexagon_nn_nn_id             mGraphId;
-    uint32_t                     mNodeCount;
-    bool                         mCompiled;
-    std::vector<OperandInfo>     mOperands;
-    std::vector<Operation>       mOperations;
-    std::vector<uint32_t>        mInputs;
-    std::vector<uint32_t>        mOutputs;
+    // members
+    hexagon_nn_nn_id mGraphId;
+    uint32_t mNodeCount;
+    bool mCompiled;
+    std::vector<OperandInfo> mOperands;
+    std::vector<Operation> mOperations;
+    std::vector<uint32_t> mInputs;
+    std::vector<uint32_t> mOutputs;
     std::vector<RunTimePoolInfo> mPools;
 };
 
-
 // template implementations
 
-template<typename Type>
+template <typename Type>
 Type Model::getScalar(uint32_t operand) {
     return *reinterpret_cast<const Type*>(mOperands[operand].buffer);
 }
 
-template<typename Type>
+template <typename Type>
 hexagon_nn_input Model::createTensor(uint32_t B, uint32_t H, uint32_t W, uint32_t D,
                                      const std::vector<Type>& values) {
     return createTensorInternal(B, H, W, D, reinterpret_cast<const uint8_t*>(values.data()),
                                 values.size() * sizeof(Type));
 }
 
-template<typename Type>
+template <typename Type>
 hexagon_nn_input Model::createValues(const std::vector<Type>& values) {
     return createTensor(1, 1, 1, values.size(), values);
 }
 
-template<typename Type>
+template <typename Type>
 hexagon_nn_input Model::createScalar(Type value) {
     return createTensorInternal(1, 1, 1, 1, reinterpret_cast<uint8_t*>(&value), sizeof(Type));
 }
 
-} // namespace hexagon
-} // namespace implementation
-} // namespace V1_0
-} // namespace neuralnetworks
-} // namespace hardware
-} // namespace android
+}  // namespace hexagon
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace neuralnetworks
+}  // namespace hardware
+}  // namespace android
 
-#endif // ANDROID_HARDWARE_V1_0_HEXAGON_MODEL_H
+#endif  // ANDROID_HARDWARE_V1_0_HEXAGON_MODEL_H
diff --git a/1.0/HexagonOperations.h b/1.0/HexagonOperations.h
index 1db41f0..a19a236 100644
--- a/1.0/HexagonOperations.h
+++ b/1.0/HexagonOperations.h
@@ -17,11 +17,11 @@
 #ifndef ANDROID_HARDWARE_V1_0_HEXAGON_OPERATIONS_H
 #define ANDROID_HARDWARE_V1_0_HEXAGON_OPERATIONS_H
 
-#include "HexagonUtils.h"
-#include "hexagon_nn_controller/hexagon_nn_controller.h"
 #include <android/hardware/neuralnetworks/1.0/types.h>
 #include <functional>
 #include <map>
+#include "HexagonUtils.h"
+#include "hexagon_nn_controller/hexagon_nn_controller.h"
 
 namespace android {
 namespace hardware {
@@ -36,25 +36,20 @@
 using ::android::hardware::neuralnetworks::V1_0::Operand;
 
 using OperationTuple = std::pair<OperationType, OperandType>;
-using HexagonPrepareFn = std::function<bool(const std::vector<uint32_t>& /* ins */,
-                                            const std::vector<uint32_t>& /* outs */,
-                                            HexagonModel* /* model */)>;
-using OperationPrepareTable = std::map<OperationTuple, HexagonPrepareFn>;
 
-OperationPrepareTable& getOperationPrepareTable();
+using HexagonOperationFn =
+    std::function<bool(const std::vector<uint32_t>& /* ins */,
+                       const std::vector<uint32_t>& /* outs */, HexagonModel* /* model */)>;
 
-using HexagonCheckFn = std::function<bool(const std::vector<uint32_t>& /* ins */,
-                                          const std::vector<uint32_t>& /* outs */,
-                                          HexagonModel* /* model */)>;
-using OperationCheckTable = std::map<OperationType, HexagonCheckFn>;
+using OperationTable = std::map<OperationTuple, HexagonOperationFn>;
+OperationTable& getOperationPrepareTable();
+OperationTable& getOperationCheckTable();
 
-OperationCheckTable& getOperationCheckTable();
+}  // namespace hexagon
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace neuralnetworks
+}  // namespace hardware
+}  // namespace android
 
-} // namespace hexagon
-} // namespace implementation
-} // namespace V1_0
-} // namespace neuralnetworks
-} // namespace hardware
-} // namespace android
-
-#endif // ANDROID_HARDWARE_V1_0_HEXAGON_OPERATIONS_H
+#endif  // ANDROID_HARDWARE_V1_0_HEXAGON_OPERATIONS_H
diff --git a/1.0/HexagonOperationsCheck.cpp b/1.0/HexagonOperationsCheck.cpp
index 23e01f8..d900c2b 100644
--- a/1.0/HexagonOperationsCheck.cpp
+++ b/1.0/HexagonOperationsCheck.cpp
@@ -39,25 +39,23 @@
     // get output size
     const Shape in1Shape = model->getShape(ins[0]);
     const Shape in2Shape = model->getShape(ins[1]);
-    Shape outShape       = model->getShape(outs[0]);
+    Shape outShape = model->getShape(outs[0]);
     HEXAGON_SOFT_ASSERT(addMulPrepare(in1Shape, in2Shape, &outShape), "Error getting shape");
     HEXAGON_SOFT_ASSERT(model->setShape(outs[0], outShape), "Error setting shape");
 
     return true;
 }
 
-bool add(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
-         HexagonModel* model) {
+bool add(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, HexagonModel* model) {
     return addMul(ins, outs, model, OperationType::ADD);
 }
 
-bool mul(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
-         HexagonModel* model) {
+bool mul(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, HexagonModel* model) {
     return addMul(ins, outs, model, OperationType::MUL);
 }
 
-bool pool(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
-          HexagonModel* model, OperationType op) {
+bool pool(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, HexagonModel* model,
+          OperationType op) {
     HEXAGON_SOFT_ASSERT(ins.size() == 10 || ins.size() == 7,
                         "Need 7 or 10 inputs for " << toString(op));
 
@@ -76,28 +74,25 @@
 
     // get parameters
     if (ins.size() == 10) {
-        padding_left   = model->getScalar<int32_t>(ins[1]);
-        padding_right  = model->getScalar<int32_t>(ins[2]);
-        padding_top    = model->getScalar<int32_t>(ins[3]);
+        padding_left = model->getScalar<int32_t>(ins[1]);
+        padding_right = model->getScalar<int32_t>(ins[2]);
+        padding_top = model->getScalar<int32_t>(ins[3]);
         padding_bottom = model->getScalar<int32_t>(ins[4]);
-        stride_width   = model->getScalar<int32_t>(ins[5]);
-        stride_height  = model->getScalar<int32_t>(ins[6]);
-        filter_width   = model->getScalar<int32_t>(ins[7]);
-        filter_height  = model->getScalar<int32_t>(ins[8]);
+        stride_width = model->getScalar<int32_t>(ins[5]);
+        stride_height = model->getScalar<int32_t>(ins[6]);
+        filter_width = model->getScalar<int32_t>(ins[7]);
+        filter_height = model->getScalar<int32_t>(ins[8]);
 
         HEXAGON_SOFT_ASSERT_NE(getPadding(inShape.dimensions[2], inShape.dimensions[1],
-                                          stride_width, stride_height,
-                                          filter_width, filter_height,
-                                          padding_left, padding_right,
-                                          padding_top, padding_bottom),
+                                          stride_width, stride_height, filter_width, filter_height,
+                                          padding_left, padding_right, padding_top, padding_bottom),
                                NN_PAD_NA, "Unknown padding");
-    }
-    else {
+    } else {
         const int32_t padding_implicit = model->getScalar<int32_t>(ins[1]);
-        stride_width                   = model->getScalar<int32_t>(ins[2]);
-        stride_height                  = model->getScalar<int32_t>(ins[3]);
-        filter_width                   = model->getScalar<int32_t>(ins[4]);
-        filter_height                  = model->getScalar<int32_t>(ins[5]);
+        stride_width = model->getScalar<int32_t>(ins[2]);
+        stride_height = model->getScalar<int32_t>(ins[3]);
+        filter_width = model->getScalar<int32_t>(ins[4]);
+        filter_height = model->getScalar<int32_t>(ins[5]);
 
         nn::calculateExplicitPadding(inShape.dimensions[2], stride_width, filter_width,
                                      padding_implicit, &padding_left, &padding_right);
@@ -107,10 +102,10 @@
 
     // get output size
     Shape outShape = model->getShape(outs[0]);
-    HEXAGON_SOFT_ASSERT(genericPoolingPrepare(inShape, padding_left, padding_right, padding_top,
-                                              padding_bottom, stride_width, stride_height,
-                                              filter_width, filter_height, &outShape),
-                        "Error getting shape");
+    HEXAGON_SOFT_ASSERT(
+        genericPoolingPrepare(inShape, padding_left, padding_right, padding_top, padding_bottom,
+                              stride_width, stride_height, filter_width, filter_height, &outShape),
+        "Error getting shape");
     HEXAGON_SOFT_ASSERT(model->setShape(outs[0], outShape), "Error setting shape");
 
     return true;
@@ -160,9 +155,9 @@
     HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for " << name);
 
     // setup shapes
-    const Shape inputShape  = model->getShape(ins[0]);
+    const Shape inputShape = model->getShape(ins[0]);
     const Shape filterShape = model->getShape(ins[1]);
-    const Shape biasShape   = model->getShape(ins[2]);
+    const Shape biasShape = model->getShape(ins[2]);
 
     // setup parameters
     int32_t padding_left;
@@ -174,42 +169,41 @@
 
     // get parameters
     if (ins.size() == 10) {
-        padding_left   = model->getScalar<int32_t>(ins[3]);
-        padding_right  = model->getScalar<int32_t>(ins[4]);
-        padding_top    = model->getScalar<int32_t>(ins[5]);
+        padding_left = model->getScalar<int32_t>(ins[3]);
+        padding_right = model->getScalar<int32_t>(ins[4]);
+        padding_top = model->getScalar<int32_t>(ins[5]);
         padding_bottom = model->getScalar<int32_t>(ins[6]);
-        stride_width   = model->getScalar<int32_t>(ins[7]);
-        stride_height  = model->getScalar<int32_t>(ins[8]);
+        stride_width = model->getScalar<int32_t>(ins[7]);
+        stride_height = model->getScalar<int32_t>(ins[8]);
 
-        HEXAGON_SOFT_ASSERT_NE(getPadding(inputShape.dimensions[2], inputShape.dimensions[1],
-                                          stride_width, stride_height, filterShape.dimensions[2],
-                                          filterShape.dimensions[1], padding_left, padding_right,
-                                          padding_top, padding_bottom),
-                               NN_PAD_NA, "Unknown padding");
-    }
-    else {
+        HEXAGON_SOFT_ASSERT_NE(
+            getPadding(inputShape.dimensions[2], inputShape.dimensions[1], stride_width,
+                       stride_height, filterShape.dimensions[2], filterShape.dimensions[1],
+                       padding_left, padding_right, padding_top, padding_bottom),
+            NN_PAD_NA, "Unknown padding");
+    } else {
         const int32_t padding_implicit = model->getScalar<int32_t>(ins[3]);
-        stride_width                   = model->getScalar<int32_t>(ins[4]);
-        stride_height                  = model->getScalar<int32_t>(ins[5]);
+        stride_width = model->getScalar<int32_t>(ins[4]);
+        stride_height = model->getScalar<int32_t>(ins[5]);
 
         nn::calculateExplicitPadding(inputShape.dimensions[2], stride_width,
-                                     filterShape.dimensions[2], padding_implicit,
-                                     &padding_left, &padding_right);
+                                     filterShape.dimensions[2], padding_implicit, &padding_left,
+                                     &padding_right);
         nn::calculateExplicitPadding(inputShape.dimensions[1], stride_height,
-                                     filterShape.dimensions[1], padding_implicit,
-                                     &padding_top, &padding_bottom);
+                                     filterShape.dimensions[1], padding_implicit, &padding_top,
+                                     &padding_bottom);
     }
 
     // get output size
     Shape outShape = model->getShape(outs[0]);
-    HEXAGON_SOFT_ASSERT(convPrepare(inputShape, filterShape, biasShape, padding_left,
-                                    padding_right, padding_top, padding_bottom, stride_width,
-                                    stride_height, &outShape), "Error getting shape");
+    HEXAGON_SOFT_ASSERT(
+        convPrepare(inputShape, filterShape, biasShape, padding_left, padding_right, padding_top,
+                    padding_bottom, stride_width, stride_height, &outShape),
+        "Error getting shape");
     HEXAGON_SOFT_ASSERT(model->setShape(outs[0], outShape), "Error setting shape");
 
     // enforce filter is a constant
-    HEXAGON_SOFT_ASSERT(model->isConstant(ins[1]),
-                        name << "requires filter to be constant data");
+    HEXAGON_SOFT_ASSERT(model->isConstant(ins[1]), name << "requires filter to be constant data");
 
     return true;
 }
@@ -221,9 +215,9 @@
     HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for " << name);
 
     // setup shapes
-    const Shape inputShape  = model->getShape(ins[0]);
+    const Shape inputShape = model->getShape(ins[0]);
     const Shape filterShape = model->getShape(ins[1]);
-    const Shape biasShape   = model->getShape(ins[2]);
+    const Shape biasShape = model->getShape(ins[2]);
 
     // setup parameters
     int32_t padding_left;
@@ -235,44 +229,42 @@
 
     // get parameters
     if (ins.size() == 11) {
-        padding_left   = model->getScalar<int32_t>(ins[3]);
-        padding_right  = model->getScalar<int32_t>(ins[4]);
-        padding_top    = model->getScalar<int32_t>(ins[5]);
+        padding_left = model->getScalar<int32_t>(ins[3]);
+        padding_right = model->getScalar<int32_t>(ins[4]);
+        padding_top = model->getScalar<int32_t>(ins[5]);
         padding_bottom = model->getScalar<int32_t>(ins[6]);
-        stride_width   = model->getScalar<int32_t>(ins[7]);
-        stride_height  = model->getScalar<int32_t>(ins[8]);
+        stride_width = model->getScalar<int32_t>(ins[7]);
+        stride_height = model->getScalar<int32_t>(ins[8]);
 
-        HEXAGON_SOFT_ASSERT_NE(getPadding(inputShape.dimensions[2], inputShape.dimensions[1],
-                                          stride_width, stride_height, filterShape.dimensions[2],
-                                          filterShape.dimensions[1], padding_left, padding_right,
-                                          padding_top, padding_bottom),
-                               NN_PAD_NA, "Unknown padding");
+        HEXAGON_SOFT_ASSERT_NE(
+            getPadding(inputShape.dimensions[2], inputShape.dimensions[1], stride_width,
+                       stride_height, filterShape.dimensions[2], filterShape.dimensions[1],
+                       padding_left, padding_right, padding_top, padding_bottom),
+            NN_PAD_NA, "Unknown padding");
 
-    }
-    else {
+    } else {
         const int32_t padding_implicit = model->getScalar<int32_t>(ins[3]);
-        stride_width                   = model->getScalar<int32_t>(ins[4]);
-        stride_height                  = model->getScalar<int32_t>(ins[5]);
+        stride_width = model->getScalar<int32_t>(ins[4]);
+        stride_height = model->getScalar<int32_t>(ins[5]);
 
         nn::calculateExplicitPadding(inputShape.dimensions[2], stride_width,
-                                     filterShape.dimensions[2], padding_implicit,
-                                     &padding_left, &padding_right);
+                                     filterShape.dimensions[2], padding_implicit, &padding_left,
+                                     &padding_right);
         nn::calculateExplicitPadding(inputShape.dimensions[1], stride_height,
-                                     filterShape.dimensions[1], padding_implicit,
-                                     &padding_top, &padding_bottom);
+                                     filterShape.dimensions[1], padding_implicit, &padding_top,
+                                     &padding_bottom);
     }
 
     // get output size
     Shape outShape = model->getShape(outs[0]);
-    HEXAGON_SOFT_ASSERT(depthwiseConvPrepare(inputShape, filterShape, biasShape, padding_left,
-                                             padding_right, padding_top, padding_bottom,
-                                             stride_width, stride_height, &outShape),
-                        "Error getting shape");
+    HEXAGON_SOFT_ASSERT(
+        depthwiseConvPrepare(inputShape, filterShape, biasShape, padding_left, padding_right,
+                             padding_top, padding_bottom, stride_width, stride_height, &outShape),
+        "Error getting shape");
     HEXAGON_SOFT_ASSERT(model->setShape(outs[0], outShape), "Error setting shape");
 
     // enforce filter is a constant
-    HEXAGON_SOFT_ASSERT(model->isConstant(ins[1]),
-                        name << " requires filter to be constant data");
+    HEXAGON_SOFT_ASSERT(model->isConstant(ins[1]), name << " requires filter to be constant data");
 
     return true;
 }
@@ -285,7 +277,7 @@
 
     // get output size
     const Shape inputShape = model->getShape(ins[0]);
-    Shape outShape         = model->getShape(outs[0]);
+    Shape outShape = model->getShape(outs[0]);
 
     HEXAGON_SOFT_ASSERT(dequantizePrepare(inputShape, &outShape), "Error getting shape");
     HEXAGON_SOFT_ASSERT(model->setShape(outs[0], outShape), "Error setting shape");
@@ -300,31 +292,29 @@
     HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for " << name);
 
     // get output size
-    const Shape inputShape   = model->getShape(ins[0]);
+    const Shape inputShape = model->getShape(ins[0]);
     const Shape weightsShape = model->getShape(ins[1]);
-    const Shape biasShape    = model->getShape(ins[2]);
-    Shape outShape           = model->getShape(outs[0]);
+    const Shape biasShape = model->getShape(ins[2]);
+    Shape outShape = model->getShape(outs[0]);
     HEXAGON_SOFT_ASSERT(fullyConnectedPrepare(inputShape, weightsShape, biasShape, &outShape),
                         "Error getting shape");
     HEXAGON_SOFT_ASSERT(model->setShape(outs[0], outShape), "Error setting shape");
 
     // enforce weight is a constant
-    HEXAGON_SOFT_ASSERT(model->isConstant(ins[1]),
-                        name << "requires weight to be constant data");
+    HEXAGON_SOFT_ASSERT(model->isConstant(ins[1]), name << "requires weight to be constant data");
 
     return true;
 }
 
 bool local_response_normalization(const std::vector<uint32_t>& ins,
-                                  const std::vector<uint32_t>& outs,
-                                  HexagonModel* model) {
+                                  const std::vector<uint32_t>& outs, HexagonModel* model) {
     std::string name = toString(OperationType::LOCAL_RESPONSE_NORMALIZATION);
     HEXAGON_SOFT_ASSERT_EQ(5, ins.size(), "Need 5 inputs for " << name);
     HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for " << name);
 
     // get output size
     const Shape inShape = model->getShape(ins[0]);
-    Shape outShape      = model->getShape(outs[0]);
+    Shape outShape = model->getShape(outs[0]);
     HEXAGON_SOFT_ASSERT(genericNormalizationPrepare(inShape, &outShape), "Error getting shape");
     HEXAGON_SOFT_ASSERT(model->setShape(outs[0], outShape), "Error setting shape");
 
@@ -333,13 +323,13 @@
 
 bool activation(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
                 HexagonModel* model, uint32_t numInputs, OperationType op) {
-    HEXAGON_SOFT_ASSERT_EQ(numInputs, ins.size(), "Need " << numInputs << " input for "
-                                                          << toString(op));
+    HEXAGON_SOFT_ASSERT_EQ(numInputs, ins.size(),
+                           "Need " << numInputs << " input for " << toString(op));
     HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for " << toString(op));
 
     // get output size
     const Shape inShape = model->getShape(ins[0]);
-    Shape outShape      = model->getShape(outs[0]);
+    Shape outShape = model->getShape(outs[0]);
     HEXAGON_SOFT_ASSERT(genericActivationPrepare(inShape, &outShape), "Error getting shape");
     HEXAGON_SOFT_ASSERT(model->setShape(outs[0], outShape), "Error setting shape");
 
@@ -383,11 +373,11 @@
     HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for " << name);
 
     // get output size
-    const Shape inShape           = model->getShape(ins[0]);
-    const Shape targetShape       = model->getShape(ins[1]);
+    const Shape inShape = model->getShape(ins[0]);
+    const Shape targetShape = model->getShape(ins[1]);
     const int32_t* targetShapePtr = model->getPointer(ins[1]);
-    int32_t targetShapeNumElem    = ::android::nn::getNumberOfElements(targetShape);
-    Shape outShape                = model->getShape(outs[0]);
+    int32_t targetShapeNumElem = ::android::nn::getNumberOfElements(targetShape);
+    Shape outShape = model->getShape(outs[0]);
     HEXAGON_SOFT_ASSERT(targetShapePtr != nullptr, "pointer value is currently nullptr");
 
     HEXAGON_SOFT_ASSERT(reshapePrepare(inShape, targetShapePtr, targetShapeNumElem, &outShape),
@@ -404,12 +394,12 @@
     HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for " << name);
 
     // get parameters
-    const int32_t width  = model->getScalar<int32_t>(ins[1]);
+    const int32_t width = model->getScalar<int32_t>(ins[1]);
     const int32_t height = model->getScalar<int32_t>(ins[2]);
 
     // get output size
-    const Shape inShape  = model->getShape(ins[0]);
-    Shape outShape       = model->getShape(outs[0]);
+    const Shape inShape = model->getShape(ins[0]);
+    Shape outShape = model->getShape(outs[0]);
     HEXAGON_SOFT_ASSERT(resizeBilinearPrepare(inShape, width, height, &outShape),
                         "Error getting shape");
     HEXAGON_SOFT_ASSERT(model->setShape(outs[0], outShape), "Error setting shape");
@@ -419,44 +409,88 @@
 
 }  // namespace
 
-OperationCheckTable& getOperationCheckTable() {
-    static OperationCheckTable table = {
-        {OperationType::ADD,                          add                         },
-        {OperationType::AVERAGE_POOL_2D,              average_pool_2d             },
-        {OperationType::CONCATENATION,                concatenation               },
-        {OperationType::CONV_2D,                      conv_2d                     },
-        {OperationType::DEPTHWISE_CONV_2D,            depthwise_conv_2d           },
-//      {OperationType::DEPTH_TO_SPACE,               depth_to_space              },
-        {OperationType::DEQUANTIZE,                   dequantize                  },
-//      {OperationType::EMBEDDING_LOOKUP,             embedding_lookup            },
-//      {OperationType::FLOOR,                        floor                       },
-        {OperationType::FULLY_CONNECTED,              fully_connected             },
-//      {OperationType::HASHTABLE_LOOKUP,             hashtable_lookup            },
-//      {OperationType::L2_NORMALIZATION,             l2_normalization            },
-        {OperationType::L2_POOL_2D,                   l2_pool_2d                  },
-        {OperationType::LOCAL_RESPONSE_NORMALIZATION, local_response_normalization},
-        {OperationType::LOGISTIC,                     logistic                    },
-//      {OperationType::LSH_PROJECTION,               lsh_projection              },
-//      {OperationType::LSTM,                         lstm                        },
-        {OperationType::MAX_POOL_2D,                  max_pool_2d                 },
-        {OperationType::MUL,                          mul                         },
-        {OperationType::RELU,                         relu                        },
-        {OperationType::RELU1,                        relu1                       },
-        {OperationType::RELU6,                        relu6                       },
-        {OperationType::RESHAPE,                      reshape                     },
-        {OperationType::RESIZE_BILINEAR,              resize_bilinear             },
-//      {OperationType::RNN,                          rnn                         },
-        {OperationType::SOFTMAX,                      softmax                     },
-//      {OperationType::SPACE_TO_DEPTH,               space_to_depth              },
-//      {OperationType::SVDF,                         svdf                        },
-        {OperationType::TANH,                         tanh                        },
+OperationTable& getOperationCheckTable() {
+    static OperationTable table = {
+        // NOTE: the operations that are commented out via inline represent
+        // operations that are valid for the Android O NNAPI release, but are
+        // currently not implemented in HVX.
+
+        // -------------------------- 32-BIT FLOAT ----------------------------
+        // HVX is only performant when running on quantized values. Further, as
+        // an optimization, the current HVX driver will convert some floating
+        // point tensors into quantized values, perform the operation, and then
+        // convert them back to floating point. This results in a loss in
+        // precision causing some tests to fail. For these reasons, the FLOAT32
+        // operations are being temporarily disabled.
+        /*
+        {{OperationType::ADD, OperandType::TENSOR_FLOAT32}, add},
+        {{OperationType::AVERAGE_POOL_2D, OperandType::TENSOR_FLOAT32}, average_pool_2d},
+        {{OperationType::CONCATENATION, OperandType::TENSOR_FLOAT32}, concatenation},
+        {{OperationType::CONV_2D, OperandType::TENSOR_FLOAT32}, conv_2d},
+        {{OperationType::DEPTHWISE_CONV_2D, OperandType::TENSOR_FLOAT32}, depthwise_conv_2d},
+        //{{OperationType::DEPTH_TO_SPACE, OperandType::TENSOR_FLOAT32}, depth_to_space},
+        //{{OperationType::EMBEDDING_LOOKUP, OperandType::TENSOR_FLOAT32}, embedding_lookup},
+        //{{OperationType::FLOOR, OperandType::TENSOR_FLOAT32}, floor},
+        {{OperationType::FULLY_CONNECTED, OperandType::TENSOR_FLOAT32}, fully_connected},
+        //{{OperationType::HASHTABLE_LOOKUP, OperandType::TENSOR_FLOAT32}, hashtable_lookup},
+        //{{OperationType::L2_NORMALIZATION, OperandType::TENSOR_FLOAT32}, l2_normalization},
+        {{OperationType::L2_POOL_2D, OperandType::TENSOR_FLOAT32}, l2_pool_2d},
+        {{OperationType::LOCAL_RESPONSE_NORMALIZATION, OperandType::TENSOR_FLOAT32},
+          local_response_normalization},
+        {{OperationType::LOGISTIC, OperandType::TENSOR_FLOAT32}, logistic},
+        //{{OperationType::LSH_PROJECTION, OperandType::TENSOR_FLOAT32}, lsh_projection},
+        //{{OperationType::LSTM, OperandType::TENSOR_FLOAT32}, lstm },
+        {{OperationType::MAX_POOL_2D, OperandType::TENSOR_FLOAT32}, max_pool_2d},
+        {{OperationType::MUL, OperandType::TENSOR_FLOAT32}, mul},
+        {{OperationType::RELU, OperandType::TENSOR_FLOAT32}, relu},
+        {{OperationType::RELU1, OperandType::TENSOR_FLOAT32}, relu1},
+        {{OperationType::RELU6, OperandType::TENSOR_FLOAT32}, relu6},
+        {{OperationType::RESHAPE, OperandType::TENSOR_FLOAT32}, reshape},
+        {{OperationType::RESIZE_BILINEAR, OperandType::TENSOR_FLOAT32}, resize_bilinear},
+        //{{OperationType::RNN, OperandType::TENSOR_FLOAT32}, rnn},
+        {{OperationType::SOFTMAX, OperandType::TENSOR_FLOAT32}, softmax},
+        //{{OperationType::SPACE_TO_DEPTH, OperandType::TENSOR_FLOAT32}, space_to_depth},
+        //{{OperationType::SVDF, OperandType::TENSOR_FLOAT32}, svdf },
+        {{OperationType::TANH, OperandType::TENSOR_FLOAT32}, tanh},
+        */
+
+        // -------------------- QUANTIZED 8-BIT ASYMMETRICAL ------------------
+        {{OperationType::ADD, OperandType::TENSOR_QUANT8_ASYMM}, add},
+        {{OperationType::AVERAGE_POOL_2D, OperandType::TENSOR_QUANT8_ASYMM}, average_pool_2d},
+        {{OperationType::CONCATENATION, OperandType::TENSOR_QUANT8_ASYMM}, concatenation},
+        {{OperationType::CONV_2D, OperandType::TENSOR_QUANT8_ASYMM}, conv_2d},
+        {{OperationType::DEPTHWISE_CONV_2D, OperandType::TENSOR_QUANT8_ASYMM}, depthwise_conv_2d},
+        //{{OperationType::DEPTH_TO_SPACE, OperandType::TENSOR_QUANT8_ASYMM}, depth_to_space},
+        {{OperationType::DEQUANTIZE, OperandType::TENSOR_QUANT8_ASYMM}, dequantize},
+        //{{OperationType::EMBEDDING_LOOKUP, OperandType::TENSOR_QUANT8_ASYMM}, embedding_lookup},
+        {{OperationType::FULLY_CONNECTED, OperandType::TENSOR_QUANT8_ASYMM}, fully_connected},
+        //{{OperationType::HASHTABLE_LOOKUP, OperandType::TENSOR_QUANT8_ASYMM}, hashtable_lookup},
+        {{OperationType::LOGISTIC, OperandType::TENSOR_QUANT8_ASYMM}, logistic},
+        //{{OperationType::LSH_PROJECTION, OperandType::TENSOR_QUANT8_ASYMM}, lsh_projection},
+        {{OperationType::MAX_POOL_2D, OperandType::TENSOR_QUANT8_ASYMM}, max_pool_2d},
+        {{OperationType::MUL, OperandType::TENSOR_QUANT8_ASYMM}, mul},
+        {{OperationType::RELU, OperandType::TENSOR_QUANT8_ASYMM}, relu},
+        {{OperationType::RELU1, OperandType::TENSOR_QUANT8_ASYMM}, relu1},
+        {{OperationType::RELU6, OperandType::TENSOR_QUANT8_ASYMM}, relu6},
+        {{OperationType::RESHAPE, OperandType::TENSOR_QUANT8_ASYMM}, reshape},
+        {{OperationType::SOFTMAX, OperandType::TENSOR_QUANT8_ASYMM}, softmax},
+        //{{OperationType::SPACE_TO_DEPTH, OperandType::TENSOR_QUANT8_ASYMM}, space_to_depth},
     };
+
+    // The following functions are normally used by float32, but those
+    // operations have been temporarily disabled. Void explicitly marks them as
+    // unused, and prevents the compiler from throwing an error.
+    (void)l2_pool_2d;
+    (void)local_response_normalization;
+    (void)tanh;
+    (void)resize_bilinear;
+
     return table;
 }
 
-} // namespace hexagon
-} // namespace implementation
-} // namespace V1_0
-} // namespace neuralnetworks
-} // namespace hardware
-} // namespace android
+}  // namespace hexagon
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace neuralnetworks
+}  // namespace hardware
+}  // namespace android
diff --git a/1.0/HexagonOperationsPrepare.cpp b/1.0/HexagonOperationsPrepare.cpp
index 1c2d7cd..3b8238f 100644
--- a/1.0/HexagonOperationsPrepare.cpp
+++ b/1.0/HexagonOperationsPrepare.cpp
@@ -32,8 +32,7 @@
 namespace {
 namespace float32 {
 
-bool add(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
-         HexagonModel* model) {
+bool add(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, HexagonModel* model) {
     HEXAGON_SOFT_ASSERT_EQ(3, ins.size(), "Need 3 inputs for float32::add");
     HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for float32::add");
 
@@ -41,7 +40,7 @@
     const hexagon_nn_input& in1 = model->getTensor(ins[0]);
     const hexagon_nn_input& in2 = model->getTensor(ins[1]);
 
-    const op_type act           = model->getFloatActivation(ins[2]);
+    const op_type act = model->getFloatActivation(ins[2]);
 
     // add node to graph
     return model->addFusedFloatOperation(OP_Add_f, NN_PAD_NA, {}, act, {in1, in2}, outs);
@@ -66,37 +65,36 @@
 
     // get parameters
     if (ins.size() == 10) {
-        const int32_t padding_left   = model->getScalar<int32_t>(ins[1]);
-        const int32_t padding_right  = model->getScalar<int32_t>(ins[2]);
-        const int32_t padding_top    = model->getScalar<int32_t>(ins[3]);
+        const int32_t padding_left = model->getScalar<int32_t>(ins[1]);
+        const int32_t padding_right = model->getScalar<int32_t>(ins[2]);
+        const int32_t padding_top = model->getScalar<int32_t>(ins[3]);
         const int32_t padding_bottom = model->getScalar<int32_t>(ins[4]);
-        stride_width                 = model->getScalar<int32_t>(ins[5]);
-        stride_height                = model->getScalar<int32_t>(ins[6]);
-        filter_width                 = model->getScalar<int32_t>(ins[7]);
-        filter_height                = model->getScalar<int32_t>(ins[8]);
-        act                          = model->getFloatActivation(ins[9]);
+        stride_width = model->getScalar<int32_t>(ins[5]);
+        stride_height = model->getScalar<int32_t>(ins[6]);
+        filter_width = model->getScalar<int32_t>(ins[7]);
+        filter_height = model->getScalar<int32_t>(ins[8]);
+        act = model->getFloatActivation(ins[9]);
 
         const Shape inputShape = model->getShape(ins[0]);
-        pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1],
-                         stride_width, stride_height, filter_width, filter_height,
-                         padding_left, padding_right, padding_top, padding_bottom);
+        pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1], stride_width,
+                         stride_height, filter_width, filter_height, padding_left, padding_right,
+                         padding_top, padding_bottom);
         HEXAGON_SOFT_ASSERT_NE(pad, NN_PAD_NA, "Unknown padding");
-    }
-    else {
-        pad           = model->getPadding(ins[1]);
-        stride_width  = model->getScalar<int32_t>(ins[2]);
+    } else {
+        pad = model->getPadding(ins[1]);
+        stride_width = model->getScalar<int32_t>(ins[2]);
         stride_height = model->getScalar<int32_t>(ins[3]);
-        filter_width  = model->getScalar<int32_t>(ins[4]);
+        filter_width = model->getScalar<int32_t>(ins[4]);
         filter_height = model->getScalar<int32_t>(ins[5]);
-        act           = model->getFloatActivation(ins[6]);
+        act = model->getFloatActivation(ins[6]);
     }
 
     const hexagon_nn_input window = model->createShape(1, filter_height, filter_width, 1);
     const hexagon_nn_input stride = model->createShape(1, stride_height, stride_width, 1);
 
     // add node to graph
-    return model->addFloatOperationWithActivation(OP_AvgPool_f, pad, act,
-                                                  {input, window, stride}, outs);
+    return model->addFloatOperationWithActivation(OP_AvgPool_f, pad, act, {input, window, stride},
+                                                  outs);
 }
 
 bool concatenation(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
@@ -109,7 +107,7 @@
     // get parameters
     std::vector<hexagon_nn_input> inputs(numInputTensors + 1);
     for (size_t i = 0; i < numInputTensors; ++i) {
-        inputs[i+1] = model->getTensor(ins[i]);
+        inputs[i + 1] = model->getTensor(ins[i]);
     }
 
     // axis being concatenated
@@ -128,9 +126,9 @@
     HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for float32::conv_2d");
 
     // get parameters
-    const hexagon_nn_input& input  = model->getTensor(ins[0]);
-    const hexagon_nn_input  filter = model->createConvFilterTensor(ins[1]);
-    const hexagon_nn_input& bias   = model->getTensor(ins[2]);
+    const hexagon_nn_input& input = model->getTensor(ins[0]);
+    const hexagon_nn_input filter = model->createConvFilterTensor(ins[1]);
+    const hexagon_nn_input& bias = model->getTensor(ins[2]);
 
     // setup parameters
     hexagon_nn_padding_type pad;
@@ -140,34 +138,32 @@
 
     // get parameters
     if (ins.size() == 10) {
-        const int32_t padding_left   = model->getScalar<int32_t>(ins[3]);
-        const int32_t padding_right  = model->getScalar<int32_t>(ins[4]);
-        const int32_t padding_top    = model->getScalar<int32_t>(ins[5]);
+        const int32_t padding_left = model->getScalar<int32_t>(ins[3]);
+        const int32_t padding_right = model->getScalar<int32_t>(ins[4]);
+        const int32_t padding_top = model->getScalar<int32_t>(ins[5]);
         const int32_t padding_bottom = model->getScalar<int32_t>(ins[6]);
-        stride_width                 = model->getScalar<int32_t>(ins[7]);
-        stride_height                = model->getScalar<int32_t>(ins[8]);
-        act                          = model->getFloatActivation(ins[9]);
+        stride_width = model->getScalar<int32_t>(ins[7]);
+        stride_height = model->getScalar<int32_t>(ins[8]);
+        act = model->getFloatActivation(ins[9]);
 
         const Shape inputShape = model->getShape(ins[0]);
         const Shape filterShape = model->getShape(ins[1]);
-        pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1],
-                         stride_width, stride_height, filterShape.dimensions[2],
-                         filterShape.dimensions[1], padding_left, padding_right,
-                         padding_top, padding_bottom);
+        pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1], stride_width,
+                         stride_height, filterShape.dimensions[2], filterShape.dimensions[1],
+                         padding_left, padding_right, padding_top, padding_bottom);
         HEXAGON_SOFT_ASSERT_NE(pad, NN_PAD_NA, "Unknown padding");
-    }
-    else {
-        pad           = model->getPadding(ins[3]);
-        stride_width  = model->getScalar<int32_t>(ins[4]);
+    } else {
+        pad = model->getPadding(ins[3]);
+        stride_width = model->getScalar<int32_t>(ins[4]);
         stride_height = model->getScalar<int32_t>(ins[5]);
-        act           = model->getFloatActivation(ins[6]);
+        act = model->getFloatActivation(ins[6]);
     }
 
     const hexagon_nn_input stride = model->createShape(1, stride_height, stride_width, 1);
 
     // add node to graph
-    return model->addFusedFloatOperation(OP_Conv2d_f, pad, bias, act,
-                                         {input, filter, stride}, outs);
+    return model->addFusedFloatOperation(OP_Conv2d_f, pad, bias, act, {input, filter, stride},
+                                         outs);
 }
 
 bool depthwise_conv_2d(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
@@ -178,7 +174,7 @@
 
     // get parameters
     const hexagon_nn_input& input = model->getTensor(ins[0]);
-    const hexagon_nn_input& bias  = model->getTensor(ins[2]);
+    const hexagon_nn_input& bias = model->getTensor(ins[2]);
 
     const Shape filterShape = model->getShape(ins[1]);
 
@@ -191,29 +187,27 @@
 
     // get parameters
     if (ins.size() == 11) {
-        const int32_t padding_left   = model->getScalar<int32_t>(ins[3]);
-        const int32_t padding_right  = model->getScalar<int32_t>(ins[4]);
-        const int32_t padding_top    = model->getScalar<int32_t>(ins[5]);
+        const int32_t padding_left = model->getScalar<int32_t>(ins[3]);
+        const int32_t padding_right = model->getScalar<int32_t>(ins[4]);
+        const int32_t padding_top = model->getScalar<int32_t>(ins[5]);
         const int32_t padding_bottom = model->getScalar<int32_t>(ins[6]);
-        stride_width                 = model->getScalar<int32_t>(ins[7]);
-        stride_height                = model->getScalar<int32_t>(ins[8]);
-        depth_multiplier             = model->getScalar<int32_t>(ins[9]);
-        act                          = model->getFloatActivation(ins[10]);
+        stride_width = model->getScalar<int32_t>(ins[7]);
+        stride_height = model->getScalar<int32_t>(ins[8]);
+        depth_multiplier = model->getScalar<int32_t>(ins[9]);
+        act = model->getFloatActivation(ins[10]);
 
         const Shape inputShape = model->getShape(ins[0]);
         const Shape filterShape = model->getShape(ins[1]);
-        pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1],
-                         stride_width, stride_height, filterShape.dimensions[2],
-                         filterShape.dimensions[1], padding_left, padding_right,
-                         padding_top, padding_bottom);
+        pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1], stride_width,
+                         stride_height, filterShape.dimensions[2], filterShape.dimensions[1],
+                         padding_left, padding_right, padding_top, padding_bottom);
         HEXAGON_SOFT_ASSERT_NE(pad, NN_PAD_NA, "Unknown padding");
-    }
-    else {
-        pad              = model->getPadding(ins[3]);
-        stride_width     = model->getScalar<int32_t>(ins[4]);
-        stride_height    = model->getScalar<int32_t>(ins[5]);
+    } else {
+        pad = model->getPadding(ins[3]);
+        stride_width = model->getScalar<int32_t>(ins[4]);
+        stride_height = model->getScalar<int32_t>(ins[5]);
         depth_multiplier = model->getScalar<int32_t>(ins[6]);
-        act              = model->getFloatActivation(ins[7]);
+        act = model->getFloatActivation(ins[7]);
     }
 
     const hexagon_nn_input filter = model->createDepthwiseFilterTensor(ins[1], depth_multiplier);
@@ -230,15 +224,14 @@
     HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for float32::fully_connected");
 
     // get parameters
-    const hexagon_nn_input& input   = model->getTensor(ins[0]);
+    const hexagon_nn_input& input = model->getTensor(ins[0]);
     const hexagon_nn_input& weights = model->createFullyConnectedWeightTensor(ins[1]);
-    const hexagon_nn_input& bias    = model->getTensor(ins[2]);
+    const hexagon_nn_input& bias = model->getTensor(ins[2]);
 
-    const op_type act               = model->getFloatActivation(ins[3]);
+    const op_type act = model->getFloatActivation(ins[3]);
 
     // add node to graph
-    return model->addFusedFloatOperation(OP_MatMul_f, NN_PAD_NA, bias, act,
-                                         {input, weights}, outs);
+    return model->addFusedFloatOperation(OP_MatMul_f, NN_PAD_NA, bias, act, {input, weights}, outs);
 }
 
 bool l2_pool_2d(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
@@ -260,42 +253,40 @@
 
     // get parameters
     if (ins.size() == 10) {
-        const int32_t padding_left   = model->getScalar<int32_t>(ins[1]);
-        const int32_t padding_right  = model->getScalar<int32_t>(ins[2]);
-        const int32_t padding_top    = model->getScalar<int32_t>(ins[3]);
+        const int32_t padding_left = model->getScalar<int32_t>(ins[1]);
+        const int32_t padding_right = model->getScalar<int32_t>(ins[2]);
+        const int32_t padding_top = model->getScalar<int32_t>(ins[3]);
         const int32_t padding_bottom = model->getScalar<int32_t>(ins[4]);
-        stride_width                 = model->getScalar<int32_t>(ins[5]);
-        stride_height                = model->getScalar<int32_t>(ins[6]);
-        filter_width                 = model->getScalar<int32_t>(ins[7]);
-        filter_height                = model->getScalar<int32_t>(ins[8]);
-        act                          = model->getFloatActivation(ins[9]);
+        stride_width = model->getScalar<int32_t>(ins[5]);
+        stride_height = model->getScalar<int32_t>(ins[6]);
+        filter_width = model->getScalar<int32_t>(ins[7]);
+        filter_height = model->getScalar<int32_t>(ins[8]);
+        act = model->getFloatActivation(ins[9]);
 
         const Shape inputShape = model->getShape(ins[0]);
-        pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1],
-                         stride_width, stride_height, filter_width, filter_height,
-                         padding_left, padding_right, padding_top, padding_bottom);
+        pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1], stride_width,
+                         stride_height, filter_width, filter_height, padding_left, padding_right,
+                         padding_top, padding_bottom);
         HEXAGON_SOFT_ASSERT_NE(pad, NN_PAD_NA, "Unknown padding");
-    }
-    else {
-        pad           = model->getPadding(ins[1]);
-        stride_width  = model->getScalar<int32_t>(ins[2]);
+    } else {
+        pad = model->getPadding(ins[1]);
+        stride_width = model->getScalar<int32_t>(ins[2]);
         stride_height = model->getScalar<int32_t>(ins[3]);
-        filter_width  = model->getScalar<int32_t>(ins[4]);
+        filter_width = model->getScalar<int32_t>(ins[4]);
         filter_height = model->getScalar<int32_t>(ins[5]);
-        act           = model->getFloatActivation(ins[6]);
+        act = model->getFloatActivation(ins[6]);
     }
 
     const hexagon_nn_input window = model->createShape(1, filter_height, filter_width, 1);
     const hexagon_nn_input stride = model->createShape(1, stride_height, stride_width, 1);
 
     // add node to graph
-    return model->addFloatOperationWithActivation(OP_L2Pool_f, pad, act,
-                                                  {input, window, stride}, outs);
+    return model->addFloatOperationWithActivation(OP_L2Pool_f, pad, act, {input, window, stride},
+                                                  outs);
 }
 
 bool local_response_normalization(const std::vector<uint32_t>& ins,
-                                  const std::vector<uint32_t>& outs,
-                                  HexagonModel* model) {
+                                  const std::vector<uint32_t>& outs, HexagonModel* model) {
     HEXAGON_SOFT_ASSERT_EQ(5, ins.size(),
                            "Need 5 inputs for float32::local_response_normalization");
     HEXAGON_SOFT_ASSERT_EQ(1, outs.size(),
@@ -303,12 +294,12 @@
 
     // get parameters
     const hexagon_nn_input& input = model->getTensor(ins[0]);
-    const hexagon_nn_input& bias  = model->getTensor(ins[2]);
+    const hexagon_nn_input& bias = model->getTensor(ins[2]);
     const hexagon_nn_input& alpha = model->getTensor(ins[3]);
-    const hexagon_nn_input& beta  = model->getTensor(ins[4]);
+    const hexagon_nn_input& beta = model->getTensor(ins[4]);
 
     // create value that's [1, 1, 1, radius] with value of 1.0f
-    const int32_t radius          = model->getScalar<int32_t>(ins[1]);
+    const int32_t radius = model->getScalar<int32_t>(ins[1]);
     const hexagon_nn_input window = model->createTensor<float>(1, 1, 1, radius * 2 + 1, {1.0f});
 
     // add node to graph
@@ -346,41 +337,39 @@
 
     // get parameters
     if (ins.size() == 10) {
-        const int32_t padding_left   = model->getScalar<int32_t>(ins[1]);
-        const int32_t padding_right  = model->getScalar<int32_t>(ins[2]);
-        const int32_t padding_top    = model->getScalar<int32_t>(ins[3]);
+        const int32_t padding_left = model->getScalar<int32_t>(ins[1]);
+        const int32_t padding_right = model->getScalar<int32_t>(ins[2]);
+        const int32_t padding_top = model->getScalar<int32_t>(ins[3]);
         const int32_t padding_bottom = model->getScalar<int32_t>(ins[4]);
-        stride_width                 = model->getScalar<int32_t>(ins[5]);
-        stride_height                = model->getScalar<int32_t>(ins[6]);
-        filter_width                 = model->getScalar<int32_t>(ins[7]);
-        filter_height                = model->getScalar<int32_t>(ins[8]);
-        act                          = model->getFloatActivation(ins[9]);
+        stride_width = model->getScalar<int32_t>(ins[5]);
+        stride_height = model->getScalar<int32_t>(ins[6]);
+        filter_width = model->getScalar<int32_t>(ins[7]);
+        filter_height = model->getScalar<int32_t>(ins[8]);
+        act = model->getFloatActivation(ins[9]);
 
         const Shape inputShape = model->getShape(ins[0]);
-        pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1],
-                         stride_width, stride_height, filter_width, filter_height,
-                         padding_left, padding_right, padding_top, padding_bottom);
+        pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1], stride_width,
+                         stride_height, filter_width, filter_height, padding_left, padding_right,
+                         padding_top, padding_bottom);
         HEXAGON_SOFT_ASSERT_NE(pad, NN_PAD_NA, "Unknown padding");
-    }
-    else {
-        pad           = model->getPadding(ins[1]);
-        stride_width  = model->getScalar<int32_t>(ins[2]);
+    } else {
+        pad = model->getPadding(ins[1]);
+        stride_width = model->getScalar<int32_t>(ins[2]);
         stride_height = model->getScalar<int32_t>(ins[3]);
-        filter_width  = model->getScalar<int32_t>(ins[4]);
+        filter_width = model->getScalar<int32_t>(ins[4]);
         filter_height = model->getScalar<int32_t>(ins[5]);
-        act           = model->getFloatActivation(ins[6]);
+        act = model->getFloatActivation(ins[6]);
     }
 
     const hexagon_nn_input window = model->createShape(1, filter_height, filter_width, 1);
     const hexagon_nn_input stride = model->createShape(1, stride_height, stride_width, 1);
 
     // add node to graph
-    return model->addFloatOperationWithActivation(OP_MaxPool_f, pad, act,
-                                                  {input, window, stride}, outs);
+    return model->addFloatOperationWithActivation(OP_MaxPool_f, pad, act, {input, window, stride},
+                                                  outs);
 }
 
-bool mul(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
-         HexagonModel* model) {
+bool mul(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, HexagonModel* model) {
     HEXAGON_SOFT_ASSERT_EQ(3, ins.size(), "Need 3 inputs for float32::mul");
     HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for float32::mul");
 
@@ -388,11 +377,10 @@
     const hexagon_nn_input& in1 = model->getTensor(ins[0]);
     const hexagon_nn_input& in2 = model->getTensor(ins[1]);
 
-    const op_type act           = model->getFloatActivation(ins[2]);
+    const op_type act = model->getFloatActivation(ins[2]);
 
     // add node to graph
-    return model->addFusedFloatOperation(OP_Mul_f, NN_PAD_NA, {}, act,
-                                         {in1, in2}, outs);
+    return model->addFusedFloatOperation(OP_Mul_f, NN_PAD_NA, {}, act, {in1, in2}, outs);
 }
 
 bool relu(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
@@ -414,8 +402,8 @@
 
     // get parameters
     const hexagon_nn_input& input = model->getTensor(ins[0]);
-    const hexagon_nn_input  min   = model->createScalar(-1.0f);
-    const hexagon_nn_input  max   = model->createScalar(1.0f);
+    const hexagon_nn_input min = model->createScalar(-1.0f);
+    const hexagon_nn_input max = model->createScalar(1.0f);
 
     // add node to graph
     return model->addBasicOperation(OP_Clamp_f, NN_PAD_NA, {input, min, max}, outs);
@@ -428,7 +416,7 @@
 
     // get parameters
     const hexagon_nn_input& input = model->getTensor(ins[0]);
-    const hexagon_nn_input  max   = model->createScalar(6.0f);
+    const hexagon_nn_input max = model->createScalar(6.0f);
 
     // add node to graph
     return model->addBasicOperation(OP_ReluX_f, NN_PAD_NA, {input, max}, outs);
@@ -440,7 +428,7 @@
     HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for float32::reshape");
 
     // get parameters
-    const hexagon_nn_input& input   = model->getTensor(ins[0]);
+    const hexagon_nn_input& input = model->getTensor(ins[0]);
     const hexagon_nn_input& newdims = model->getTensor(ins[1]);
 
     // add node to graph
@@ -455,8 +443,8 @@
     // get parameters
     const hexagon_nn_input& input = model->getTensor(ins[0]);
 
-    const int32_t width           = model->getScalar<int32_t>(ins[1]);
-    const int32_t height          = model->getScalar<int32_t>(ins[2]);
+    const int32_t width = model->getScalar<int32_t>(ins[1]);
+    const int32_t height = model->getScalar<int32_t>(ins[2]);
 
     const hexagon_nn_input newdim = model->createValues<int32_t>({height, width});
 
@@ -471,7 +459,7 @@
 
     // get parameters
     const hexagon_nn_input& input = model->getTensor(ins[0]);
-    const hexagon_nn_input& beta  = model->getTensor(ins[1]);
+    const hexagon_nn_input& beta = model->getTensor(ins[1]);
 
     // add node to graph
     return model->addBasicOperation(OP_Softmax_f, NN_PAD_NA, {input, beta}, outs);
@@ -489,20 +477,19 @@
     return model->addBasicOperation(OP_Tanh_f, NN_PAD_NA, {input}, outs);
 }
 
-}  // float32 namespace
+}  // namespace float32
 
 namespace quant8_asym {
 
-bool add(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
-         HexagonModel* model) {
+bool add(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, HexagonModel* model) {
     HEXAGON_SOFT_ASSERT_EQ(3, ins.size(), "Need 3 inputs for quant8_asym::add");
     HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for quant8_asym::add");
 
     // get parameters
-    const hexagon_nn_input& in1     = model->getTensor(ins[0]);
-    const hexagon_nn_input& in2     = model->getTensor(ins[1]);
+    const hexagon_nn_input& in1 = model->getTensor(ins[0]);
+    const hexagon_nn_input& in2 = model->getTensor(ins[1]);
 
-    const op_type act               = model->getQuantizedActivation(ins[2]);
+    const op_type act = model->getQuantizedActivation(ins[2]);
 
     const hexagon_nn_input& in1_min = model->getQuantizationMin(ins[0]);
     const hexagon_nn_input& in1_max = model->getQuantizationMax(ins[0]);
@@ -511,7 +498,7 @@
 
     // add node to graph
     return model->addFusedQuant8Operation(OP_QuantizedAdd_8p8to32, NN_PAD_NA, {}, act,
-                                          {in1, in2, in1_min, in2_min, in1_max, in2_max}, outs);
+                                          {in1, in2, in1_min, in1_max, in2_min, in2_max}, outs);
 }
 
 bool average_pool_2d(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
@@ -533,35 +520,34 @@
 
     // get parameters
     if (ins.size() == 10) {
-        const int32_t padding_left   = model->getScalar<int32_t>(ins[1]);
-        const int32_t padding_right  = model->getScalar<int32_t>(ins[2]);
-        const int32_t padding_top    = model->getScalar<int32_t>(ins[3]);
+        const int32_t padding_left = model->getScalar<int32_t>(ins[1]);
+        const int32_t padding_right = model->getScalar<int32_t>(ins[2]);
+        const int32_t padding_top = model->getScalar<int32_t>(ins[3]);
         const int32_t padding_bottom = model->getScalar<int32_t>(ins[4]);
-        stride_width                 = model->getScalar<int32_t>(ins[5]);
-        stride_height                = model->getScalar<int32_t>(ins[6]);
-        filter_width                 = model->getScalar<int32_t>(ins[7]);
-        filter_height                = model->getScalar<int32_t>(ins[8]);
-        act                          = model->getQuantizedActivation(ins[9]);
+        stride_width = model->getScalar<int32_t>(ins[5]);
+        stride_height = model->getScalar<int32_t>(ins[6]);
+        filter_width = model->getScalar<int32_t>(ins[7]);
+        filter_height = model->getScalar<int32_t>(ins[8]);
+        act = model->getQuantizedActivation(ins[9]);
 
         const Shape inputShape = model->getShape(ins[0]);
-        pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1],
-                         stride_width, stride_height, filter_width, filter_height,
-                         padding_left, padding_right, padding_top, padding_bottom);
+        pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1], stride_width,
+                         stride_height, filter_width, filter_height, padding_left, padding_right,
+                         padding_top, padding_bottom);
         HEXAGON_SOFT_ASSERT_NE(pad, NN_PAD_NA, "Unknown padding");
-    }
-    else {
-        pad           = model->getPadding(ins[1]);
-        stride_width  = model->getScalar<int32_t>(ins[2]);
+    } else {
+        pad = model->getPadding(ins[1]);
+        stride_width = model->getScalar<int32_t>(ins[2]);
         stride_height = model->getScalar<int32_t>(ins[3]);
-        filter_width  = model->getScalar<int32_t>(ins[4]);
+        filter_width = model->getScalar<int32_t>(ins[4]);
         filter_height = model->getScalar<int32_t>(ins[5]);
-        act           = model->getQuantizedActivation(ins[6]);
+        act = model->getQuantizedActivation(ins[6]);
     }
 
     const hexagon_nn_input& in_min = model->getQuantizationMin(ins[0]);
     const hexagon_nn_input& in_max = model->getQuantizationMax(ins[0]);
-    const hexagon_nn_input window  = model->createShape(1, filter_height, filter_width, 1);
-    const hexagon_nn_input stride  = model->createShape(1, stride_height, stride_width, 1);
+    const hexagon_nn_input window = model->createShape(1, filter_height, filter_width, 1);
+    const hexagon_nn_input stride = model->createShape(1, stride_height, stride_width, 1);
 
     // add node to graph
     return model->addQuant8OperationWithActivation(OP_QuantizedAvgPool_8, pad, act,
@@ -578,9 +564,9 @@
     // get parameters
     std::vector<hexagon_nn_input> inputs(numInputTensors * 3 + 1);
     for (size_t i = 0; i < numInputTensors; ++i) {
-        inputs[i+1+numInputTensors*0] = model->getTensor(ins[i]);
-        inputs[i+1+numInputTensors*1] = model->getQuantizationMin(ins[i]);
-        inputs[i+1+numInputTensors*2] = model->getQuantizationMax(ins[i]);
+        inputs[i + 1 + numInputTensors * 0] = model->getTensor(ins[i]);
+        inputs[i + 1 + numInputTensors * 1] = model->getQuantizationMin(ins[i]);
+        inputs[i + 1 + numInputTensors * 2] = model->getQuantizationMax(ins[i]);
     }
 
     // axis being concatenated
@@ -599,9 +585,9 @@
     HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for quant8_asym::conv_2d");
 
     // get parameters
-    const hexagon_nn_input& input      = model->getTensor(ins[0]);
-    const hexagon_nn_input  filter     = model->createConvFilterTensor(ins[1]);
-    const hexagon_nn_input& bias       = model->getTensor(ins[2]);
+    const hexagon_nn_input& input = model->getTensor(ins[0]);
+    const hexagon_nn_input filter = model->createConvFilterTensor(ins[1]);
+    const hexagon_nn_input& bias = model->getTensor(ins[2]);
 
     // setup parameters
     hexagon_nn_padding_type pad;
@@ -611,40 +597,40 @@
 
     // get parameters
     if (ins.size() == 10) {
-        const int32_t padding_left   = model->getScalar<int32_t>(ins[3]);
-        const int32_t padding_right  = model->getScalar<int32_t>(ins[4]);
-        const int32_t padding_top    = model->getScalar<int32_t>(ins[5]);
+        const int32_t padding_left = model->getScalar<int32_t>(ins[3]);
+        const int32_t padding_right = model->getScalar<int32_t>(ins[4]);
+        const int32_t padding_top = model->getScalar<int32_t>(ins[5]);
         const int32_t padding_bottom = model->getScalar<int32_t>(ins[6]);
-        stride_width                 = model->getScalar<int32_t>(ins[7]);
-        stride_height                = model->getScalar<int32_t>(ins[8]);
-        act                          = model->getQuantizedActivation(ins[9]);
+        stride_width = model->getScalar<int32_t>(ins[7]);
+        stride_height = model->getScalar<int32_t>(ins[8]);
+        act = model->getQuantizedActivation(ins[9]);
 
         const Shape inputShape = model->getShape(ins[0]);
         const Shape filterShape = model->getShape(ins[1]);
-        pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1],
-                         stride_width, stride_height, filterShape.dimensions[2],
-                         filterShape.dimensions[1], padding_left, padding_right,
-                         padding_top, padding_bottom);
+        pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1], stride_width,
+                         stride_height, filterShape.dimensions[2], filterShape.dimensions[1],
+                         padding_left, padding_right, padding_top, padding_bottom);
         HEXAGON_SOFT_ASSERT_NE(pad, NN_PAD_NA, "Unknown padding");
-    }
-    else {
-        pad           = model->getPadding(ins[3]);
-        stride_width  = model->getScalar<int32_t>(ins[4]);
+    } else {
+        pad = model->getPadding(ins[3]);
+        stride_width = model->getScalar<int32_t>(ins[4]);
         stride_height = model->getScalar<int32_t>(ins[5]);
-        act           = model->getQuantizedActivation(ins[6]);
+        act = model->getQuantizedActivation(ins[6]);
     }
 
-    const hexagon_nn_input& input_min  = model->getQuantizationMin(ins[0]);
-    const hexagon_nn_input& input_max  = model->getQuantizationMax(ins[0]);
+    const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]);
+    const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]);
     const hexagon_nn_input& filter_min = model->getQuantizationMin(ins[1]);
     const hexagon_nn_input& filter_max = model->getQuantizationMax(ins[1]);
+    const hexagon_nn_input& bias_min = model->getQuantizationMin(ins[2]);
+    const hexagon_nn_input& bias_max = model->getQuantizationMax(ins[2]);
 
     const hexagon_nn_input stride = model->createShape(1, stride_height, stride_width, 1);
 
     // add node to graph
-    return model->addFusedQuant8Operation(OP_QuantizedConv2d_8x8to32, pad, bias, act,
-                                          {input, filter, input_min, input_max,
-                                            filter_min, filter_max, stride}, outs);
+    return model->addFusedQuant8Operation(
+        OP_QuantizedConv2d_8x8to32, pad, {bias, bias_min, bias_max}, act,
+        {input, filter, input_min, input_max, filter_min, filter_max, stride}, outs);
 }
 
 bool depthwise_conv_2d(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
@@ -655,7 +641,7 @@
 
     // get parameters
     const hexagon_nn_input& input = model->getTensor(ins[0]);
-    const hexagon_nn_input& bias  = model->getTensor(ins[2]);
+    const hexagon_nn_input& bias = model->getTensor(ins[2]);
 
     // setup parameters
     hexagon_nn_padding_type pad;
@@ -666,43 +652,43 @@
 
     // get parameters
     if (ins.size() == 11) {
-        const int32_t padding_left   = model->getScalar<int32_t>(ins[3]);
-        const int32_t padding_right  = model->getScalar<int32_t>(ins[4]);
-        const int32_t padding_top    = model->getScalar<int32_t>(ins[5]);
+        const int32_t padding_left = model->getScalar<int32_t>(ins[3]);
+        const int32_t padding_right = model->getScalar<int32_t>(ins[4]);
+        const int32_t padding_top = model->getScalar<int32_t>(ins[5]);
         const int32_t padding_bottom = model->getScalar<int32_t>(ins[6]);
-        stride_width                 = model->getScalar<int32_t>(ins[7]);
-        stride_height                = model->getScalar<int32_t>(ins[8]);
-        depth_multiplier             = model->getScalar<int32_t>(ins[9]);
-        act                          = model->getQuantizedActivation(ins[10]);
+        stride_width = model->getScalar<int32_t>(ins[7]);
+        stride_height = model->getScalar<int32_t>(ins[8]);
+        depth_multiplier = model->getScalar<int32_t>(ins[9]);
+        act = model->getQuantizedActivation(ins[10]);
 
         const Shape inputShape = model->getShape(ins[0]);
         const Shape filterShape = model->getShape(ins[1]);
-        pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1],
-                         stride_width, stride_height, filterShape.dimensions[2],
-                         filterShape.dimensions[1], padding_left, padding_right,
-                         padding_top, padding_bottom);
+        pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1], stride_width,
+                         stride_height, filterShape.dimensions[2], filterShape.dimensions[1],
+                         padding_left, padding_right, padding_top, padding_bottom);
         HEXAGON_SOFT_ASSERT_NE(pad, NN_PAD_NA, "Unknown padding");
-    }
-    else {
-        pad              = model->getPadding(ins[3]);
-        stride_width     = model->getScalar<int32_t>(ins[4]);
-        stride_height    = model->getScalar<int32_t>(ins[5]);
+    } else {
+        pad = model->getPadding(ins[3]);
+        stride_width = model->getScalar<int32_t>(ins[4]);
+        stride_height = model->getScalar<int32_t>(ins[5]);
         depth_multiplier = model->getScalar<int32_t>(ins[6]);
-        act              = model->getQuantizedActivation(ins[7]);
+        act = model->getQuantizedActivation(ins[7]);
     }
 
-    const hexagon_nn_input& input_min  = model->getQuantizationMin(ins[0]);
-    const hexagon_nn_input& input_max  = model->getQuantizationMax(ins[0]);
+    const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]);
+    const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]);
     const hexagon_nn_input& filter_min = model->getQuantizationMin(ins[1]);
     const hexagon_nn_input& filter_max = model->getQuantizationMax(ins[1]);
+    const hexagon_nn_input& bias_min = model->getQuantizationMin(ins[2]);
+    const hexagon_nn_input& bias_max = model->getQuantizationMax(ins[2]);
 
     const hexagon_nn_input filter = model->createDepthwiseFilterTensor(ins[1], depth_multiplier);
     const hexagon_nn_input stride = model->createShape(1, stride_height, stride_width, 1);
 
     // add node to graph
-    return model->addFusedQuant8Operation(OP_QuantizedDepthwiseConv2d_8x8to32, pad, bias, act,
-                                          {input, filter, input_min, input_max, filter_min,
-                                            filter_max, stride}, outs);
+    return model->addFusedQuant8Operation(
+        OP_QuantizedDepthwiseConv2d_8x8to32, pad, {bias, bias_min, bias_max}, act,
+        {input, filter, input_min, input_max, filter_min, filter_max, stride}, outs);
 }
 
 bool dequantize(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
@@ -711,7 +697,7 @@
     HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for quant8_asym::dequantize");
 
     // get parameters
-    const hexagon_nn_input& input     = model->getTensor(ins[0]);
+    const hexagon_nn_input& input = model->getTensor(ins[0]);
 
     const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]);
     const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]);
@@ -726,21 +712,23 @@
     HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for quant8::fully_connected");
 
     // get parameters
-    const hexagon_nn_input& input       = model->getTensor(ins[0]);
-    const hexagon_nn_input& weights     = model->createFullyConnectedWeightTensor(ins[1]);
-    const hexagon_nn_input& bias        = model->getTensor(ins[2]);
+    const hexagon_nn_input& input = model->getTensor(ins[0]);
+    const hexagon_nn_input& weights = model->createFullyConnectedWeightTensor(ins[1]);
+    const hexagon_nn_input& bias = model->getTensor(ins[2]);
 
-    const op_type act                   = model->getQuantizedActivation(ins[3]);
+    const op_type act = model->getQuantizedActivation(ins[3]);
 
-    const hexagon_nn_input& input_min   = model->getQuantizationMin(ins[0]);
-    const hexagon_nn_input& input_max   = model->getQuantizationMax(ins[0]);
+    const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]);
+    const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]);
     const hexagon_nn_input& weights_min = model->getQuantizationMin(ins[1]);
     const hexagon_nn_input& weights_max = model->getQuantizationMax(ins[1]);
+    const hexagon_nn_input& bias_min = model->getQuantizationMin(ins[2]);
+    const hexagon_nn_input& bias_max = model->getQuantizationMax(ins[2]);
 
     // add node to graph
-    return model->addFusedQuant8Operation(OP_QuantizedMatMul_8x8to32, NN_PAD_NA, bias, act,
-                                          {input, weights, input_min, input_max,
-                                            weights_min, weights_max}, outs);
+    return model->addFusedQuant8Operation(
+        OP_QuantizedMatMul_8x8to32, NN_PAD_NA, {bias, bias_min, bias_max}, act,
+        {input, weights, input_min, input_max, weights_min, weights_max}, outs);
 }
 
 bool logistic(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
@@ -754,11 +742,11 @@
     const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]);
 
     // TFLite uses different max value
-    const hexagon_nn_input  input_max = model->createQuantizationValue(ins[0], 256);
+    const hexagon_nn_input input_max = model->createQuantizationValue(ins[0], 256);
 
     // add node to graph
-    return model->addBasicOperation(OP_QuantizedSigmoid_8, NN_PAD_NA,
-                                    {input, input_min, input_max}, outs);
+    return model->addBasicOperation(OP_QuantizedSigmoid_8, NN_PAD_NA, {input, input_min, input_max},
+                                    outs);
 }
 
 bool max_pool_2d(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
@@ -768,7 +756,7 @@
     HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for quant8_asym::max_pool_2d");
 
     // get parameters
-    const hexagon_nn_input& input     = model->getTensor(ins[0]);
+    const hexagon_nn_input& input = model->getTensor(ins[0]);
 
     // setup parameters
     hexagon_nn_padding_type pad;
@@ -780,52 +768,49 @@
 
     // get parameters
     if (ins.size() == 10) {
-        const int32_t padding_left   = model->getScalar<int32_t>(ins[1]);
-        const int32_t padding_right  = model->getScalar<int32_t>(ins[2]);
-        const int32_t padding_top    = model->getScalar<int32_t>(ins[3]);
+        const int32_t padding_left = model->getScalar<int32_t>(ins[1]);
+        const int32_t padding_right = model->getScalar<int32_t>(ins[2]);
+        const int32_t padding_top = model->getScalar<int32_t>(ins[3]);
         const int32_t padding_bottom = model->getScalar<int32_t>(ins[4]);
-        stride_width                 = model->getScalar<int32_t>(ins[5]);
-        stride_height                = model->getScalar<int32_t>(ins[6]);
-        filter_width                 = model->getScalar<int32_t>(ins[7]);
-        filter_height                = model->getScalar<int32_t>(ins[8]);
-        act                          = model->getQuantizedActivation(ins[9]);
+        stride_width = model->getScalar<int32_t>(ins[5]);
+        stride_height = model->getScalar<int32_t>(ins[6]);
+        filter_width = model->getScalar<int32_t>(ins[7]);
+        filter_height = model->getScalar<int32_t>(ins[8]);
+        act = model->getQuantizedActivation(ins[9]);
 
         const Shape inputShape = model->getShape(ins[0]);
-        pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1],
-                         stride_width, stride_height, filter_width, filter_height,
-                         padding_left, padding_right, padding_top, padding_bottom);
+        pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1], stride_width,
+                         stride_height, filter_width, filter_height, padding_left, padding_right,
+                         padding_top, padding_bottom);
         HEXAGON_SOFT_ASSERT_NE(pad, NN_PAD_NA, "Unknown padding");
-    }
-    else {
-        pad           = model->getPadding(ins[1]);
-        stride_width  = model->getScalar<int32_t>(ins[2]);
+    } else {
+        pad = model->getPadding(ins[1]);
+        stride_width = model->getScalar<int32_t>(ins[2]);
         stride_height = model->getScalar<int32_t>(ins[3]);
-        filter_width  = model->getScalar<int32_t>(ins[4]);
+        filter_width = model->getScalar<int32_t>(ins[4]);
         filter_height = model->getScalar<int32_t>(ins[5]);
-        act           = model->getQuantizedActivation(ins[6]);
+        act = model->getQuantizedActivation(ins[6]);
     }
 
     const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]);
     const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]);
-    const hexagon_nn_input window     = model->createShape(1, filter_height, filter_width, 1);
-    const hexagon_nn_input stride     = model->createShape(1, stride_height, stride_width, 1);
+    const hexagon_nn_input window = model->createShape(1, filter_height, filter_width, 1);
+    const hexagon_nn_input stride = model->createShape(1, stride_height, stride_width, 1);
 
     // add node to graph
-    return model->addQuant8OperationWithActivation(OP_QuantizedMaxPool_8, pad, act,
-                                                   {input, input_min, input_max, window, stride},
-                                                   outs);
+    return model->addQuant8OperationWithActivation(
+        OP_QuantizedMaxPool_8, pad, act, {input, input_min, input_max, window, stride}, outs);
 }
 
-bool mul(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
-         HexagonModel* model) {
+bool mul(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, HexagonModel* model) {
     HEXAGON_SOFT_ASSERT_EQ(3, ins.size(), "Need 3 inputs for quant8_asym::mul");
     HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for quant8_asym::mul");
 
     // get parameters
-    const hexagon_nn_input& in1     = model->getTensor(ins[0]);
-    const hexagon_nn_input& in2     = model->getTensor(ins[1]);
+    const hexagon_nn_input& in1 = model->getTensor(ins[0]);
+    const hexagon_nn_input& in2 = model->getTensor(ins[1]);
 
-    const op_type act               = model->getQuantizedActivation(ins[2]);
+    const op_type act = model->getQuantizedActivation(ins[2]);
 
     const hexagon_nn_input& in1_min = model->getQuantizationMin(ins[0]);
     const hexagon_nn_input& in1_max = model->getQuantizationMax(ins[0]);
@@ -843,14 +828,14 @@
     HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for quant8_asym::relu");
 
     // get parameters
-    const hexagon_nn_input& input     = model->getTensor(ins[0]);
+    const hexagon_nn_input& input = model->getTensor(ins[0]);
 
     const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]);
     const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]);
 
     // add node to graph
-    return model->addBasicOperation(OP_QuantizedRelu_8, NN_PAD_NA,
-                                    {input, input_min, input_max}, outs);
+    return model->addBasicOperation(OP_QuantizedRelu_8, NN_PAD_NA, {input, input_min, input_max},
+                                    outs);
 }
 
 bool relu1(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
@@ -860,8 +845,8 @@
 
     // get parameters
     const hexagon_nn_input& input = model->getTensor(ins[0]);
-    const hexagon_nn_input  min   = model->createScalar(-1.0f);
-    const hexagon_nn_input  max   = model->createScalar(1.0f);
+    const hexagon_nn_input min = model->createScalar(-1.0f);
+    const hexagon_nn_input max = model->createScalar(1.0f);
 
     const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]);
     const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]);
@@ -878,7 +863,7 @@
 
     // get parameters
     const hexagon_nn_input& input = model->getTensor(ins[0]);
-    const hexagon_nn_input  max   = model->createScalar(6.0f);
+    const hexagon_nn_input max = model->createScalar(6.0f);
 
     const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]);
     const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]);
@@ -894,7 +879,7 @@
     HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for quant8_asym::reshape");
 
     // get parameters
-    const hexagon_nn_input& input   = model->getTensor(ins[0]);
+    const hexagon_nn_input& input = model->getTensor(ins[0]);
     const hexagon_nn_input& newdims = model->getTensor(ins[1]);
 
     const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]);
@@ -912,7 +897,7 @@
 
     // get parameters
     const hexagon_nn_input& input = model->getTensor(ins[0]);
-    const hexagon_nn_input& beta  = model->getTensor(ins[1]);
+    const hexagon_nn_input& beta = model->getTensor(ins[1]);
 
     const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]);
     const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]);
@@ -922,84 +907,119 @@
                                     {input, input_min, input_max, beta}, outs);
 }
 
-}  // quant8_asym namespace
+}  // namespace quant8_asym
 
 }  // namespace
 
-OperationPrepareTable& getOperationPrepareTable() {
-    static OperationPrepareTable table = {
+OperationTable& getOperationPrepareTable() {
+    static OperationTable table = {
+        // NOTE: the operations that are commented out via inline represent
+        // operations that are valid for the Android O NNAPI release, but are
+        // currently not implemented in HVX.
+
         // -------------------------- 32-BIT FLOAT ----------------------------
-        {{OperationType::ADD, OperandType::TENSOR_FLOAT32},             float32::add             },
-        {{OperationType::AVERAGE_POOL_2D, OperandType::TENSOR_FLOAT32}, float32::average_pool_2d },
-        {{OperationType::CONCATENATION, OperandType::TENSOR_FLOAT32},   float32::concatenation   },
-        {{OperationType::CONV_2D, OperandType::TENSOR_FLOAT32},         float32::conv_2d         },
+        // HVX is only performant when running on quantized values. Further, as
+        // an optimization, the current HVX driver will convert some floating
+        // point tensors into quantized values, perform the operation, and then
+        // convert them back to floating point. This results in a loss in
+        // precision causing some tests to fail. For these reasons, the FLOAT32
+        // operations are being temporarily disabled.
+        /*
+        {{OperationType::ADD, OperandType::TENSOR_FLOAT32}, float32::add},
+        {{OperationType::AVERAGE_POOL_2D, OperandType::TENSOR_FLOAT32}, float32::average_pool_2d},
+        {{OperationType::CONCATENATION, OperandType::TENSOR_FLOAT32}, float32::concatenation},
+        {{OperationType::CONV_2D, OperandType::TENSOR_FLOAT32}, float32::conv_2d},
         {{OperationType::DEPTHWISE_CONV_2D, OperandType::TENSOR_FLOAT32},
-                                                                       float32::depthwise_conv_2d},
-//      {{OperationType::DEPTH_TO_SPACE, OperandType::TENSOR_FLOAT32},  float32::depth_to_space  },
-//      {{OperationType::EMBEDDING_LOOKUP, OperandType::TENSOR_FLOAT32},
-//                                                                      float32::embedding_lookup},
-//      {{OperationType::FLOOR, OperandType::TENSOR_FLOAT32},           float32::floor           },
-        {{OperationType::FULLY_CONNECTED, OperandType::TENSOR_FLOAT32}, float32::fully_connected },
-//      {{OperationType::HASHTABLE_LOOKUP, OperandType::TENSOR_FLOAT32},
-//                                                                      float32::hashtable_lookup},
-//      {{OperationType::L2_NORMALIZATION, OperandType::TENSOR_FLOAT32},
-//                                                                      float32::l2_normalization},
-        {{OperationType::L2_POOL_2D, OperandType::TENSOR_FLOAT32},      float32::l2_pool_2d      },
+          float32::depthwise_conv_2d},
+        //{{OperationType::DEPTH_TO_SPACE, OperandType::TENSOR_FLOAT32}, float32::depth_to_space},
+        //{{OperationType::EMBEDDING_LOOKUP, OperandType::TENSOR_FLOAT32},
+        //  float32::embedding_lookup},
+        //{{OperationType::FLOOR, OperandType::TENSOR_FLOAT32}, float32::floor},
+        {{OperationType::FULLY_CONNECTED, OperandType::TENSOR_FLOAT32}, float32::fully_connected},
+        //{{OperationType::HASHTABLE_LOOKUP, OperandType::TENSOR_FLOAT32},
+        //  float32::hashtable_lookup},
+        //{{OperationType::L2_NORMALIZATION, OperandType::TENSOR_FLOAT32},
+        //  float32::l2_normalization},
+        {{OperationType::L2_POOL_2D, OperandType::TENSOR_FLOAT32}, float32::l2_pool_2d},
         {{OperationType::LOCAL_RESPONSE_NORMALIZATION, OperandType::TENSOR_FLOAT32},
-                                                            float32::local_response_normalization},
-        {{OperationType::LOGISTIC, OperandType::TENSOR_FLOAT32},        float32::logistic        },
-//      {{OperationType::LSH_PROJECTION, OperandType::TENSOR_FLOAT32},  float32::lsh_projection  },
-//      {{OperationType::LSTM, OperandType::TENSOR_FLOAT32},            float32::lstm            },
-        {{OperationType::MAX_POOL_2D, OperandType::TENSOR_FLOAT32},     float32::max_pool_2d     },
-        {{OperationType::MUL, OperandType::TENSOR_FLOAT32},             float32::mul             },
-        {{OperationType::RELU, OperandType::TENSOR_FLOAT32},            float32::relu            },
-        {{OperationType::RELU1, OperandType::TENSOR_FLOAT32},           float32::relu1           },
-        {{OperationType::RELU6, OperandType::TENSOR_FLOAT32},           float32::relu6           },
-        {{OperationType::RESHAPE, OperandType::TENSOR_FLOAT32},         float32::reshape         },
-        {{OperationType::RESIZE_BILINEAR, OperandType::TENSOR_FLOAT32}, float32::resize_bilinear },
-//      {{OperationType::RNN, OperandType::TENSOR_FLOAT32},             float32::rnn             },
-        {{OperationType::SOFTMAX, OperandType::TENSOR_FLOAT32},         float32::softmax         },
-//      {{OperationType::SPACE_TO_DEPTH, OperandType::TENSOR_FLOAT32},  float32::space_to_depth  },
-//      {{OperationType::SVDF, OperandType::TENSOR_FLOAT32},            float32::svdf            },
-        {{OperationType::TANH, OperandType::TENSOR_FLOAT32},            float32::tanh            },
+          float32::local_response_normalization},
+        {{OperationType::LOGISTIC, OperandType::TENSOR_FLOAT32}, float32::logistic},
+        //{{OperationType::LSH_PROJECTION, OperandType::TENSOR_FLOAT32}, float32::lsh_projection},
+        //{{OperationType::LSTM, OperandType::TENSOR_FLOAT32}, float32::lstm },
+        {{OperationType::MAX_POOL_2D, OperandType::TENSOR_FLOAT32}, float32::max_pool_2d},
+        {{OperationType::MUL, OperandType::TENSOR_FLOAT32}, float32::mul},
+        {{OperationType::RELU, OperandType::TENSOR_FLOAT32}, float32::relu},
+        {{OperationType::RELU1, OperandType::TENSOR_FLOAT32}, float32::relu1},
+        {{OperationType::RELU6, OperandType::TENSOR_FLOAT32}, float32::relu6},
+        {{OperationType::RESHAPE, OperandType::TENSOR_FLOAT32}, float32::reshape},
+        {{OperationType::RESIZE_BILINEAR, OperandType::TENSOR_FLOAT32}, float32::resize_bilinear},
+        //{{OperationType::RNN, OperandType::TENSOR_FLOAT32}, float32::rnn},
+        {{OperationType::SOFTMAX, OperandType::TENSOR_FLOAT32}, float32::softmax},
+        //{{OperationType::SPACE_TO_DEPTH, OperandType::TENSOR_FLOAT32}, float32::space_to_depth},
+        //{{OperationType::SVDF, OperandType::TENSOR_FLOAT32}, float32::svdf },
+        {{OperationType::TANH, OperandType::TENSOR_FLOAT32}, float32::tanh},
+        */
 
         // -------------------- QUANTIZED 8-BIT ASYMMETRICAL ------------------
-        {{OperationType::ADD, OperandType::TENSOR_QUANT8_ASYMM},     quant8_asym::add            },
+        {{OperationType::ADD, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::add},
         {{OperationType::AVERAGE_POOL_2D, OperandType::TENSOR_QUANT8_ASYMM},
-                                                                     quant8_asym::average_pool_2d},
+         quant8_asym::average_pool_2d},
         {{OperationType::CONCATENATION, OperandType::TENSOR_QUANT8_ASYMM},
-                                                                     quant8_asym::concatenation  },
-        {{OperationType::CONV_2D, OperandType::TENSOR_QUANT8_ASYMM},    quant8_asym::conv_2d     },
+         quant8_asym::concatenation},
+        {{OperationType::CONV_2D, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::conv_2d},
         {{OperationType::DEPTHWISE_CONV_2D, OperandType::TENSOR_QUANT8_ASYMM},
-                                                                   quant8_asym::depthwise_conv_2d},
-//      {{OperationType::DEPTH_TO_SPACE, OperandType::TENSOR_QUANT8_ASYMM},
-//                                                                   quant8_asym::depth_to_space },
-        {{OperationType::DEQUANTIZE, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::dequantize  },
-//      {{OperationType::EMBEDDING_LOOKUP, OperandType::TENSOR_QUANT8_ASYMM},
-//                                                                  quant8_asym::embedding_lookup},
+         quant8_asym::depthwise_conv_2d},
+        //{{OperationType::DEPTH_TO_SPACE, OperandType::TENSOR_QUANT8_ASYMM},
+        //  quant8_asym::depth_to_space},
+        {{OperationType::DEQUANTIZE, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::dequantize},
+        //{{OperationType::EMBEDDING_LOOKUP, OperandType::TENSOR_QUANT8_ASYMM},
+        //  quant8_asym::embedding_lookup},
         {{OperationType::FULLY_CONNECTED, OperandType::TENSOR_QUANT8_ASYMM},
-                                                                     quant8_asym::fully_connected},
-//      {{OperationType::HASHTABLE_LOOKUP, OperandType::TENSOR_QUANT8_ASYMM},
-//                                                                  quant8_asym::hashtable_lookup},
-        {{OperationType::LOGISTIC, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::logistic      },
-//      {{OperationType::LSH_PROJECTION, OperandType::TENSOR_QUANT8_ASYMM},
-//                                                                   quant8_asym::lsh_projection },
+         quant8_asym::fully_connected},
+        //{{OperationType::HASHTABLE_LOOKUP, OperandType::TENSOR_QUANT8_ASYMM},
+        //  quant8_asym::hashtable_lookup},
+        {{OperationType::LOGISTIC, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::logistic},
+        //{{OperationType::LSH_PROJECTION, OperandType::TENSOR_QUANT8_ASYMM},
+        //  quant8_asym::lsh_projection},
         {{OperationType::MAX_POOL_2D, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::max_pool_2d},
-        {{OperationType::MUL, OperandType::TENSOR_QUANT8_ASYMM},     quant8_asym::mul            },
-        {{OperationType::RELU, OperandType::TENSOR_QUANT8_ASYMM},    quant8_asym::relu           },
-        {{OperationType::RELU1, OperandType::TENSOR_QUANT8_ASYMM},   quant8_asym::relu1          },
-        {{OperationType::RELU6, OperandType::TENSOR_QUANT8_ASYMM},   quant8_asym::relu6          },
-        {{OperationType::RESHAPE, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::reshape        },
-        {{OperationType::SOFTMAX, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::softmax        },
-//      {{OperationType::SPACE_TO_DEPTH, OperandType::TENSOR_QUANT8_ASYMM},
-//                                                                   quant8_asym::space_to_depth },
+        {{OperationType::MUL, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::mul},
+        {{OperationType::RELU, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::relu},
+        {{OperationType::RELU1, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::relu1},
+        {{OperationType::RELU6, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::relu6},
+        {{OperationType::RESHAPE, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::reshape},
+        {{OperationType::SOFTMAX, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::softmax},
+        //{{OperationType::SPACE_TO_DEPTH, OperandType::TENSOR_QUANT8_ASYMM},
+        //  quant8_asym::space_to_depth},
     };
+
+    // The following functions are normally used by float32, but those
+    // operations have been temporarily disabled. Void explicitly marks them as
+    // unused, and prevents the compiler from throwing an error.
+    (void)float32::add;
+    (void)float32::average_pool_2d;
+    (void)float32::concatenation;
+    (void)float32::conv_2d;
+    (void)float32::depthwise_conv_2d;
+    (void)float32::fully_connected;
+    (void)float32::l2_pool_2d;
+    (void)float32::local_response_normalization;
+    (void)float32::logistic;
+    (void)float32::max_pool_2d;
+    (void)float32::mul;
+    (void)float32::relu;
+    (void)float32::relu1;
+    (void)float32::relu6;
+    (void)float32::reshape;
+    (void)float32::resize_bilinear;
+    (void)float32::softmax;
+    (void)float32::tanh;
+
     return table;
 }
 
-} // namespace hexagon
-} // namespace implementation
-} // namespace V1_0
-} // namespace neuralnetworks
-} // namespace hardware
-} // namespace android
+}  // namespace hexagon
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace neuralnetworks
+}  // namespace hardware
+}  // namespace android
diff --git a/1.0/HexagonUtils.cpp b/1.0/HexagonUtils.cpp
index a1c55d2..bab6a5e 100644
--- a/1.0/HexagonUtils.cpp
+++ b/1.0/HexagonUtils.cpp
@@ -16,12 +16,12 @@
 
 #define LOG_TAG "android.hardware.neuralnetworks@1.0-impl-hvx"
 
-#include "OperationsUtils.h"
 #include "HexagonUtils.h"
-#include <algorithm>
 #include <hidlmemory/mapping.h>
+#include <algorithm>
 #include <numeric>
 #include <vector>
+#include "OperationsUtils.h"
 
 namespace android {
 namespace hardware {
@@ -32,7 +32,12 @@
 
 bool isHexagonAvailable() {
     int version = -1;
-    hexagon::Controller::getInstance().version(&version);
+    Controller::getInstance().version(&version);
+    if (version != 92) {
+        LOG(INFO) << "ATTEMPTING TO RESTART NNLIB";
+        Controller::getInstance().resetNnlib();
+        Controller::getInstance().version(&version);
+    }
     return version == 92;
 }
 
@@ -48,11 +53,10 @@
     };
 }
 
-hexagon_nn_padding_type getPadding(int32_t inWidth, int32_t inHeight,
-                                   int32_t strideWidth, int32_t strideHeight,
-                                   int32_t filterWidth, int32_t filterHeight,
-                                   int32_t paddingLeft, int32_t paddingRight,
-                                   int32_t paddingTop, int32_t paddingBottom) {
+hexagon_nn_padding_type getPadding(int32_t inWidth, int32_t inHeight, int32_t strideWidth,
+                                   int32_t strideHeight, int32_t filterWidth, int32_t filterHeight,
+                                   int32_t paddingLeft, int32_t paddingRight, int32_t paddingTop,
+                                   int32_t paddingBottom) {
     return getPadding(::android::nn::getPaddingScheme(inWidth, inHeight, strideWidth, strideHeight,
                                                       filterWidth, filterHeight, paddingLeft,
                                                       paddingRight, paddingTop, paddingBottom));
@@ -90,12 +94,12 @@
 
 uint32_t getSize(OperandType type) {
     static const uint32_t sizes[] = {
-        4, // FLOAT32
-        4, // INT32
-        4, // UINT32
-        4, // TENSOR_FLOAT32
-        4, // TENSOR_INT32
-        1, // TENSOR_SYMMETRICAL_QUANT8
+        4,  // FLOAT32
+        4,  // INT32
+        4,  // UINT32
+        4,  // TENSOR_FLOAT32
+        4,  // TENSOR_INT32
+        1,  // TENSOR_SYMMETRICAL_QUANT8
     };
     HEXAGON_SOFT_ASSERT(static_cast<uint32_t>(type) < sizeof(sizes) / sizeof(*sizes),
                         "Error: type exceeds max enum value");
@@ -103,23 +107,25 @@
 }
 
 std::vector<uint32_t> getAlignedDimensions(const std::vector<uint32_t>& dims, uint32_t N) {
-    HEXAGON_SOFT_ASSERT_GE(N, dims.size(),
-                           "Error: constant data dimensions " << dims.size() <<
-                           " exceeds alignment of " << N);
+    HEXAGON_SOFT_ASSERT_GE(
+        N, dims.size(),
+        "Error: constant data dimensions " << dims.size() << " exceeds alignment of " << N);
     std::vector<uint32_t> dimensions(N - dims.size(), 1);
     dimensions.insert(dimensions.end(), dims.begin(), dims.end());
     return dimensions;
 }
 
 std::vector<RunTimePoolInfo> mapPools(const hidl_vec<hidl_memory>& pools) {
-    std::vector<RunTimePoolInfo> poolInfos(pools.size());
-    for (size_t i = 0; i < pools.size(); i++) {
-        HEXAGON_SOFT_ASSERT(poolInfos[i].set(pools[i]), "Error setting pool " << i);
+    std::vector<RunTimePoolInfo> poolInfos;
+    poolInfos.reserve(pools.size());
+    bool fail = false;
+    for (const auto& pool : pools) {
+        poolInfos.emplace_back(pool, &fail);
     }
+    HEXAGON_SOFT_ASSERT(!fail, "Error setting pools");
     return poolInfos;
 }
 
-
 std::unordered_set<uint32_t> getPoolIndexes(const std::vector<RequestArgument>& inputsOutputs) {
     std::unordered_set<uint32_t> indexes;
     for (const RequestArgument& inputOutput : inputsOutputs) {
@@ -137,21 +143,21 @@
 
 const uint8_t* getDataFromPool(const RunTimePoolInfo& pool, uint32_t offset,
                                [[maybe_unused]] uint32_t length) {
-    //HEXAGON_SOFT_ASSERT_LE(offset + length, pool->getSize(),
+    // HEXAGON_SOFT_ASSERT_LE(offset + length, pool->getSize(),
     //                       "Error: trying to copy data from outside of pool bounds");
-    return pool.buffer + offset;
+    return pool.getBuffer() + offset;
 }
-} // anonymous namespace
+}  // anonymous namespace
 
 const uint8_t* getData(const Operand& operand, const hidl_vec<uint8_t>& block,
                        const std::vector<RunTimePoolInfo>& pools) {
-    switch(operand.lifetime) {
+    switch (operand.lifetime) {
         case OperandLifeTime::TEMPORARY_VARIABLE:
             return nullptr;
         case OperandLifeTime::MODEL_INPUT:
         case OperandLifeTime::MODEL_OUTPUT:
             HEXAGON_SOFT_ASSERT(false,
-                   "Error: trying to retrieve data that is only known at runtime");
+                                "Error: trying to retrieve data that is only known at runtime");
         case OperandLifeTime::CONSTANT_COPY:
             return getDataFromBlock(block, operand.location.offset, operand.location.length);
         case OperandLifeTime::CONSTANT_REFERENCE:
@@ -172,11 +178,11 @@
 
 bool operator==(const hexagon_nn_output& lhs, const hexagon_nn_output& rhs) {
     return lhs.rank == rhs.rank && lhs.max_sizes[0] == rhs.max_sizes[0] &&
-            lhs.max_sizes[1] == rhs.max_sizes[1] && lhs.max_sizes[2] == rhs.max_sizes[2] &&
-            lhs.max_sizes[3] == rhs.max_sizes[3] && lhs.max_sizes[4] == rhs.max_sizes[4] &&
-            lhs.max_sizes[5] == rhs.max_sizes[5] && lhs.max_sizes[6] == rhs.max_sizes[6] &&
-            lhs.max_sizes[7] == rhs.max_sizes[7] && lhs.elementsize == rhs.elementsize &&
-            lhs.zero_offset == rhs.zero_offset && lhs.stepsize == rhs.stepsize;
+           lhs.max_sizes[1] == rhs.max_sizes[1] && lhs.max_sizes[2] == rhs.max_sizes[2] &&
+           lhs.max_sizes[3] == rhs.max_sizes[3] && lhs.max_sizes[4] == rhs.max_sizes[4] &&
+           lhs.max_sizes[5] == rhs.max_sizes[5] && lhs.max_sizes[6] == rhs.max_sizes[6] &&
+           lhs.max_sizes[7] == rhs.max_sizes[7] && lhs.elementsize == rhs.elementsize &&
+           lhs.zero_offset == rhs.zero_offset && lhs.stepsize == rhs.stepsize;
 }
 
 bool operator!=(const hexagon_nn_output& lhs, const hexagon_nn_output& rhs) {
@@ -198,206 +204,6 @@
     return output;
 }
 
-namespace {
-
-const char* kOps[] = {
-    "OP_INPUT",
-    "OP_OUTPUT",
-    "OP_Nop",
-    "OP_Const",
-    "OP_Check",
-    "OP_Close_f",
-    "OP_Close_quint8",
-    "OP_Close_q_quint8",
-    "OP_Close_int32",
-    "OP_Close_qint32",
-    "OP_PPrint_8",
-    "OP_PPrint_32",
-    "OP_PPrint_f",
-    "OP_PreFree",
-    "OP_Flatten",
-    "OP_QuantizedConv2d_8x8to32",
-    "OP_QuantizedConv2d_8x8to32_ref",
-    "OP_QuantizedMatMul_8x8to32",
-    "OP_QuantizedMatMul_8x8to32_ref",
-    "OP_QuantizeDownAndShrinkRange_32to8",
-    "OP_QuantizeDownAndShrinkRange_32to8_ref",
-    "OP_QuantizedRelu_8",
-    "OP_QuantizedRelu_8_ref",
-    "OP_QuantizedReluX_8",
-    "OP_QuantizedReluX_8_ref",
-    "OP_QuantizedMaxPool_8",
-    "OP_QuantizedMaxPool_8_ref",
-    "OP_QuantizedAvgPool_8",
-    "OP_QuantizedAvgPool_8_ref",
-    "OP_QuantizedL2Pool_8",
-    "OP_QuantizedL2Pool_8_ref",
-    "OP_QuantizedConcat_8",
-    "OP_QuantizedConcat_8_ref",
-    "OP_QuantizedBiasAdd_8p8to32",
-    "OP_QuantizedBiasAdd_8p8to32_ref",
-    "OP_Min_f",
-    "OP_Min_f_ref",
-    "OP_Max_f",
-    "OP_Max_f_ref",
-    "OP_Quantize",
-    "OP_Quantize_ref",
-    "OP_Dequantize",
-    "OP_Dequantize_ref",
-    "OP_Supernode_8x8p8to8",
-    "OP_Supernode_8x8p8to8_ref",
-    "OP_QuantizedFlatten",
-    "OP_Softmax_f",
-    "OP_Conv2d_f",
-    "OP_MatMul_f",
-    "OP_Relu_f",
-    "OP_ReluX_f",
-    "OP_AvgPool_f",
-    "OP_L2Pool_f",
-    "OP_MaxPool_f",
-    "OP_Concat_f",
-    "OP_BiasAdd_f",
-    "OP_LRN_f",
-    "OP_Variable",
-    "OP_Assign",
-    "OP_Reshape",
-    "OP_QuantizedReshape",
-    "OP_Tanh_f",
-    "OP_Sigmoid_f",
-    "OP_Slice_8",
-    "OP_Slice_f",
-    "OP_QuantizedSlice_8",
-    "OP_Add_f",
-    "OP_Mul_f",
-    "OP_Minimum_f",
-    "OP_Maximum_f",
-    "OP_Requantize_32to8",
-    "OP_Requantize_32to8_ref",
-    "OP_RequantizationRange_32",
-    "OP_RequantizationRange_32_ref",
-    "OP_Neg_f",
-    "OP_Sub_f",
-    "OP_AddN_f",
-    "OP_Range_int32",
-    "OP_Rank_int32",
-    "OP_Transpose_int32",
-    "OP_Transpose_f",
-    "OP_InstanceNorm_f",
-    "OP_QuantizedInstanceNorm_8",
-    "OP_QuantizedInstanceNorm_8_ref",
-    "OP_Sub_int32",
-    "OP_Add_int32",
-    "OP_Split_f",
-    "OP_Dequantize_qint32_f",
-    "OP_PRelu_f",
-    "OP_QuantizedPRelu_8",
-    "OP_QuantizedPRelu_8_ref",
-    "OP_Sum_f",
-    "OP_Prod_f",
-    "OP_Mul_int32",
-    "OP_LogicalAnd_int32",
-    "OP_LogicalOr_int32",
-    "OP_LogicalXor_int32",
-    "OP_Shape_int32",
-    "OP_Pack_int32",
-    "OP_MirrorPad_f",
-    "OP_ResizeNearestNeighbor_f",
-    "OP_StridedSlice_int32",
-    "OP_StridedSlice_f",
-    "OP_ExpandDims_int32",
-    "OP_ExpandDims_f",
-    "OP_LogSoftmax_f",
-    "OP_Split_int32",
-    "OP_QuantizedSplit_8",
-    "OP_Deconv_f",
-    "OP_QuantizedDeconv_8x8to32",
-    "OP_QuantizedDeconv_8x8to32_ref",
-    "OP_QuantizedMul_8x8to32",
-    "OP_QuantizedMul_8x8to32_ref",
-    "OP_QuantizedAdd_8p8to32",
-    "OP_QuantizedAdd_8p8to32_ref",
-    "OP_QuantizedSigmoid_8",
-    "OP_QuantizedSigmoid_8_ref",
-    "OP_QuantizedTanh_8",
-    "OP_QuantizedTanh_8_ref",
-    "OP_QuantizedSoftmax_8",
-    "OP_QuantizedSoftmax_8_ref",
-    "OP_QuantizedLRN_8",
-    "OP_QuantizedLRN_8_ref",
-    "OP_Quantizedpad2d_frame_8p",
-    "OP_Quantizedpad2d_frame_8p_ref",
-    "OP_QuantizedSub_8p8to32",
-    "OP_QuantizedSub_8p8to32_ref",
-    "OP_QuantizedMaximum_8",
-    "OP_QuantizedMaximum_8_ref",
-    "OP_QuantizedMinimum_8",
-    "OP_QuantizedMinimum_8_ref",
-    "OP_Pad_f",
-    "OP_SpaceToBatchND_f",
-    "OP_BatchToSpaceND_f",
-    "OP_QuantizedPad_8",
-    "OP_ResizeBilinear_f",
-    "OP_ConcatV2_f",
-    "OP_ConcatV2_int32",
-    "OP_Prod_int32",
-    "OP_Slice_int32",
-    "OP_QuantizedAdd_8p8to8",
-    "OP_QuantizedResizeBilinear_8",
-    "OP_Supernode_8x8p8to8_d32",
-    "OP_Convert_to_d32",
-    "OP_Convert_from_d32",
-    "OP_QuantizedMaxPool_8_d32",
-    "OP_QuantizedMaxPool_8_d32_ref",
-    "OP_QuantizedConcat_8_d32",
-    "OP_QuantizedConcat_8_d32_ref",
-    "OP_QuantizedAvgPool_8_d32",
-    "OP_QuantizedAvgPool_8_d32_ref",
-    "OP_Sink",
-    "OP_QuantizedPRelu_8_d32",
-    "OP_QuantizedPRelu_8_d32_ref",
-    "OP_AutoQuantize",
-    "OP_AutoQuantize_ref",
-    "OP_QuantizedDepthwiseConv2d_8x8to32",
-    "OP_QuantizedDepthwiseConv2d_8x8to32_ref",
-    "OP_DepthwiseConv2d_f",
-    "OP_DepthwiseSupernode_8x8p8to8",
-    "OP_DepthwiseSupernode_8x8p8to8_d32",
-    "OP_QuantizedMul_8x8to8_d32",
-    "OP_QuantizedMul_8x8to8_d32_ref",
-    "OP_FullyConnected_u8",
-    "OP_QuantizedAdd_8x8to8_d32",
-    "OP_QuantizedAdd_8x8to8_d32_ref",
-    "OP_QuantizedClamp_8",
-    "OP_QuantizedClamp_8_ref",
-    "OP_Clamp_f",
-    "OP_QuantizeForTest_d32",
-    "OP_Close_d32",
-    "OP_QuantizedSub_8x8to8_d32",
-    "OP_QuantizedSub_8x8to8_d32_ref",
-    "OP_InputSupernode_8x8p8to8_outd32",
-    "OP_QuantizedLRN_8_d32",
-    "OP_QuantizedBiasAdd_32p32to32",
-    "OP_QuantizedBiasAdd_32p32to32_ref",
-    "OP_Quantize_int32",
-    "OP_Quantize_int32_ref",
-    "OP_Supernode_8x8p32to8",
-    "OP_DepthwiseSupernode_8x8p32to8",
-    "OP_Supernode_8x8p32to8_d32",
-    "OP_DepthwiseSupernode_8x8p32to8_d32",
-    "OP_InputSupernode_8x8p32to8_outd32",
-};
-
-const char* kPadding[] = {
-    "NN_PAD_NA",
-    "NN_PAD_SAME",
-    "NN_PAD_VALID",
-    "NN_PAD_MIRROR_REFLECT",
-    "NN_PAD_MIRROR_SYMMETRIC",
-    "NN_PAD_SAME_CAFFE",
-};
-
-} // anonymous namespace
-
 // printers
 std::string toString(uint32_t val) {
     return std::to_string(val);
@@ -412,63 +218,74 @@
 }
 
 std::string toString(op_type op) {
-    return static_cast<size_t>(op) < sizeof(kOps) / sizeof(char*) ?
-            kOps[static_cast<size_t>(op)] : "<invalid op_type>";
+    static const char* opText[] = {
+#define DEF_OP(NAME, ...) "OP_" #NAME,
+#include "hexagon_nn_controller/ops.def"
+#undef DEF_OP
+    };
+    return static_cast<size_t>(op) < sizeof(opText) / sizeof(char*)
+               ? opText[static_cast<size_t>(op)]
+               : "<invalid op_type>";
 }
 
 std::string toString(hexagon_nn_padding_type padding) {
-    return static_cast<size_t>(padding) < sizeof(kPadding) / sizeof(char*) ?
-            kPadding[static_cast<size_t>(padding)] : "<invalid hexagon_nn_padding_type>";
+    static const char* paddingText[] = {
+        "NN_PAD_NA",
+        "NN_PAD_SAME",
+        "NN_PAD_VALID",
+        "NN_PAD_MIRROR_REFLECT",
+        "NN_PAD_MIRROR_SYMMETRIC",
+        "NN_PAD_SAME_CAFFE",
+    };
+    return static_cast<size_t>(padding) < sizeof(paddingText) / sizeof(char*)
+               ? paddingText[static_cast<size_t>(padding)]
+               : "<invalid hexagon_nn_padding_type>";
 }
 
 std::string toString(const hexagon_nn_input& input) {
     return "hexagon_nn_input{.src_id: " + std::to_string(input.src_id) +
-            ", .output_idx: " + std::to_string(input.output_idx) + "}";
+           ", .output_idx: " + std::to_string(input.output_idx) + "}";
 }
 
 std::string toString(const hexagon_nn_output& output) {
-    return "hexagon_nn_output{.rank: " + std::to_string(output.rank) +
-            ", .max_sizes: [" + std::to_string(output.max_sizes[0]) +
-                ", " + std::to_string(output.max_sizes[1]) +
-                ", " + std::to_string(output.max_sizes[2]) +
-                ", " + std::to_string(output.max_sizes[3]) +
-                ", " + std::to_string(output.max_sizes[4]) +
-                ", " + std::to_string(output.max_sizes[5]) +
-                ", " + std::to_string(output.max_sizes[6]) +
-                ", " + std::to_string(output.max_sizes[7]) + "]" +
-            ", .elementsize: " + std::to_string(output.elementsize) +
-            ", .zero_offset: " + std::to_string(output.zero_offset) +
-            ", .stepsize: " + std::to_string(output.stepsize) + "}";
+    return "hexagon_nn_output{.rank: " + std::to_string(output.rank) + ", .max_sizes: [" +
+           std::to_string(output.max_sizes[0]) + ", " + std::to_string(output.max_sizes[1]) + ", " +
+           std::to_string(output.max_sizes[2]) + ", " + std::to_string(output.max_sizes[3]) + ", " +
+           std::to_string(output.max_sizes[4]) + ", " + std::to_string(output.max_sizes[5]) + ", " +
+           std::to_string(output.max_sizes[6]) + ", " + std::to_string(output.max_sizes[7]) + "]" +
+           ", .elementsize: " + std::to_string(output.elementsize) +
+           ", .zero_offset: " + std::to_string(output.zero_offset) +
+           ", .stepsize: " + std::to_string(output.stepsize) + "}";
 }
 
 std::string toString(const hexagon_nn_tensordef& tensordef) {
     return "hexagon_nn_tensordef{.batches: " + std::to_string(tensordef.batches) +
-            ", .height: " + std::to_string(tensordef.height) +
-            ", .width: " + std::to_string(tensordef.width) +
-            ", .depth: " + std::to_string(tensordef.depth) +
-            ", .data: " + std::to_string(reinterpret_cast<uintptr_t>(tensordef.data)) +
-            ", .dataLen: " + std::to_string(tensordef.dataLen) +
-            ", .data_valid_len: " + std::to_string(tensordef.data_valid_len) +
-            ", .unused: " + std::to_string(tensordef.unused) + "}";
+           ", .height: " + std::to_string(tensordef.height) +
+           ", .width: " + std::to_string(tensordef.width) +
+           ", .depth: " + std::to_string(tensordef.depth) +
+           ", .data: " + std::to_string(reinterpret_cast<uintptr_t>(tensordef.data)) +
+           ", .dataLen: " + std::to_string(tensordef.dataLen) +
+           ", .data_valid_len: " + std::to_string(tensordef.data_valid_len) +
+           ", .unused: " + std::to_string(tensordef.unused) + "}";
 }
 
 std::string toString(const hexagon_nn_perfinfo& perfinfo) {
     return "hexagon_nn_perfinfo{.node_id: " + std::to_string(perfinfo.node_id) +
-            ", .executions: " + std::to_string(perfinfo.executions) +
-            ", .counter_lo: " + std::to_string(perfinfo.counter_lo) +
-            ", .counter_hi: " + std::to_string(perfinfo.counter_hi) + "}";
+           ", .executions: " + std::to_string(perfinfo.executions) +
+           ", .counter_lo: " + std::to_string(perfinfo.counter_lo) +
+           ", .counter_hi: " + std::to_string(perfinfo.counter_hi) + "}";
 }
 
 std::string toString(const ::android::nn::Shape& shape) {
     return "Shape{.type: " + toString(shape.type) +
-            ", .dimensions: " + toString(shape.dimensions.data(), shape.dimensions.size()) +
-            ", .scale: " + std::to_string(shape.scale) +
-            ", .zeroPoint: " + std::to_string(shape.offset) + "}";
+           ", .dimensions: " + toString(shape.dimensions.data(), shape.dimensions.size()) +
+           ", .scale: " + std::to_string(shape.scale) +
+           ", .zeroPoint: " + std::to_string(shape.offset) + "}";
 }
 
-} // namespace hexagon
-} // namespace implementation
-} // namespace V1_0
-} // namespace neuralnetworks
-} // namespace hardware
-} // namespace android
+}  // namespace hexagon
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace neuralnetworks
+}  // namespace hardware
+}  // namespace android
diff --git a/1.0/HexagonUtils.h b/1.0/HexagonUtils.h
index 88379d2..a50bb6f 100644
--- a/1.0/HexagonUtils.h
+++ b/1.0/HexagonUtils.h
@@ -17,31 +17,31 @@
 #ifndef ANDROID_HARDWARE_V1_0_UTILS_H
 #define ANDROID_HARDWARE_V1_0_UTILS_H
 
+#include <android-base/logging.h>
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <string>
+#include <unordered_set>
+#include <vector>
 #include "CpuExecutor.h"
 #include "HexagonController.h"
 #include "OperationsUtils.h"
 #include "hexagon_nn_controller/hexagon_nn_controller.h"
-#include <android/hardware/neuralnetworks/1.0/types.h>
-#include <android-base/logging.h>
-#include <unordered_set>
-#include <string>
-#include <vector>
 
-#define HEXAGON_SOFT_ASSERT(condition, message)                                                  \
-    if (!(condition)) {                                                                          \
-        LOG(DEBUG) << __FILE__ << "::" << __LINE__ << " -- " << message;                         \
-        return {};                                                                               \
+#define HEXAGON_SOFT_ASSERT(condition, message)                          \
+    if (!(condition)) {                                                  \
+        LOG(DEBUG) << __FILE__ << "::" << __LINE__ << " -- " << message; \
+        return {};                                                       \
     }
 
-#define HEXAGON_SOFT_ASSERT_CMP(cmp, lhs, rhs, message)                                          \
-    HEXAGON_SOFT_ASSERT(((lhs) cmp (rhs)),                                                       \
-        "failed "#lhs" "#cmp" "#rhs" (" << (lhs) << " "#cmp" " << (rhs) << "): " message)
+#define HEXAGON_SOFT_ASSERT_CMP(cmp, lhs, rhs, message)                        \
+    HEXAGON_SOFT_ASSERT(((lhs)cmp(rhs)), "failed " #lhs " " #cmp " " #rhs " (" \
+                                             << (lhs) << " " #cmp " " << (rhs) << "): " message)
 
 #define HEXAGON_SOFT_ASSERT_EQ(lhs, rhs, message) HEXAGON_SOFT_ASSERT_CMP(==, lhs, rhs, message)
 #define HEXAGON_SOFT_ASSERT_NE(lhs, rhs, message) HEXAGON_SOFT_ASSERT_CMP(!=, lhs, rhs, message)
-#define HEXAGON_SOFT_ASSERT_LT(lhs, rhs, message) HEXAGON_SOFT_ASSERT_CMP(<,  lhs, rhs, message)
+#define HEXAGON_SOFT_ASSERT_LT(lhs, rhs, message) HEXAGON_SOFT_ASSERT_CMP(<, lhs, rhs, message)
 #define HEXAGON_SOFT_ASSERT_LE(lhs, rhs, message) HEXAGON_SOFT_ASSERT_CMP(<=, lhs, rhs, message)
-#define HEXAGON_SOFT_ASSERT_GT(lhs, rhs, message) HEXAGON_SOFT_ASSERT_CMP(>,  lhs, rhs, message)
+#define HEXAGON_SOFT_ASSERT_GT(lhs, rhs, message) HEXAGON_SOFT_ASSERT_CMP(>, lhs, rhs, message)
 #define HEXAGON_SOFT_ASSERT_GE(lhs, rhs, message) HEXAGON_SOFT_ASSERT_CMP(>=, lhs, rhs, message)
 
 namespace android {
@@ -51,21 +51,20 @@
 namespace implementation {
 namespace hexagon {
 
+using ::android::sp;
 using ::android::hardware::hidl_memory;
 using ::android::hardware::hidl_vec;
 using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc;
 using ::android::hardware::neuralnetworks::V1_0::Operand;
-using ::android::sp;
 using ::android::nn::RunTimePoolInfo;
 
 bool isHexagonAvailable();
 
 hexagon_nn_padding_type getPadding(uint32_t pad);
-hexagon_nn_padding_type getPadding(int32_t inWidth, int32_t inHeight,
-                                   int32_t strideWidth, int32_t strideHeight,
-                                   int32_t filterWidth, int32_t filterHeight,
-                                   int32_t paddingLeft, int32_t paddingRight,
-                                   int32_t paddingTop, int32_t paddingBottom);
+hexagon_nn_padding_type getPadding(int32_t inWidth, int32_t inHeight, int32_t strideWidth,
+                                   int32_t strideHeight, int32_t filterWidth, int32_t filterHeight,
+                                   int32_t paddingLeft, int32_t paddingRight, int32_t paddingTop,
+                                   int32_t paddingBottom);
 op_type getFloatActivationFunction(FusedActivationFunc act);
 op_type getQuantizedActivationFunction(FusedActivationFunc act);
 
@@ -79,12 +78,12 @@
 const uint8_t* getData(const Operand& operand, const hidl_vec<uint8_t>& block,
                        const std::vector<RunTimePoolInfo>& pools);
 
-template<typename Type>
+template <typename Type>
 std::vector<Type> transpose(uint32_t height, uint32_t width, const Type* input) {
     std::vector<Type> output(height * width);
     for (uint32_t i = 0; i < height; ++i) {
         for (uint32_t j = 0; j < width; ++j) {
-            output[j*height + i] = input[i*width + j];
+            output[j * height + i] = input[i * width + j];
         }
     }
     return output;
@@ -109,7 +108,7 @@
 std::string toString(const hexagon_nn_perfinfo& perfinfo);
 std::string toString(const ::android::nn::Shape& input);
 
-template<typename Type>
+template <typename Type>
 std::string toString(const Type* buffer, uint32_t count) {
     std::string os = "[";
     for (uint32_t i = 0; i < count; ++i) {
@@ -118,47 +117,47 @@
     return os += "]";
 }
 
-template<typename CharT, typename Traits>
+template <typename CharT, typename Traits>
 std::basic_ostream<CharT, Traits>& operator<<(std::basic_ostream<CharT, Traits>& os,
-                                             const hexagon_nn_input& obj) {
+                                              const hexagon_nn_input& obj) {
     return os << toString(obj);
 }
 
-template<typename CharT, typename Traits>
+template <typename CharT, typename Traits>
 std::basic_ostream<CharT, Traits>& operator<<(std::basic_ostream<CharT, Traits>& os,
-                                             const hexagon_nn_output& obj) {
+                                              const hexagon_nn_output& obj) {
     return os << toString(obj);
 }
 
-template<typename CharT, typename Traits>
+template <typename CharT, typename Traits>
 std::basic_ostream<CharT, Traits>& operator<<(std::basic_ostream<CharT, Traits>& os,
-                                             const hexagon_nn_tensordef& obj) {
+                                              const hexagon_nn_tensordef& obj) {
     return os << toString(obj);
 }
 
-template<typename CharT, typename Traits>
+template <typename CharT, typename Traits>
 std::basic_ostream<CharT, Traits>& operator<<(std::basic_ostream<CharT, Traits>& os,
-                                             const hexagon_nn_perfinfo& obj) {
+                                              const hexagon_nn_perfinfo& obj) {
     return os << toString(obj);
 }
 
-template<typename CharT, typename Traits>
+template <typename CharT, typename Traits>
 std::basic_ostream<CharT, Traits>& operator<<(std::basic_ostream<CharT, Traits>& os,
-                                             const ::android::nn::Shape& obj) {
+                                              const ::android::nn::Shape& obj) {
     return os << toString(obj);
 }
 
-template<typename CharT, typename Traits>
+template <typename CharT, typename Traits>
 std::basic_ostream<CharT, Traits>& operator<<(std::basic_ostream<CharT, Traits>& os,
                                               ErrorStatus status) {
     return os << toString(status);
 }
 
-} // namespace hexagon
-} // namespace implementation
-} // namespace V1_0
-} // namespace neuralnetworks
-} // namespace hardware
-} // namespace android
+}  // namespace hexagon
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace neuralnetworks
+}  // namespace hardware
+}  // namespace android
 
-#endif // ANDROID_HARDWARE_V1_0_UTILS_H
+#endif  // ANDROID_HARDWARE_V1_0_UTILS_H
diff --git a/1.0/PreparedModel.cpp b/1.0/PreparedModel.cpp
index 4e96c9a..c95d56f 100644
--- a/1.0/PreparedModel.cpp
+++ b/1.0/PreparedModel.cpp
@@ -16,10 +16,11 @@
 
 #define LOG_TAG "android.hardware.neuralnetworks@1.0-impl-hvx"
 
-#include "HexagonUtils.h"
 #include "PreparedModel.h"
 #include <android-base/logging.h>
 #include <thread>
+#include "HexagonUtils.h"
+#include "ValidateHal.h"
 
 namespace android {
 namespace hardware {
@@ -28,16 +29,19 @@
 namespace implementation {
 
 PreparedModel::PreparedModel(const Model& neuralNetworksModel,
-                             const std::shared_ptr<hexagon::Model>& hexagonModel) :
-        mNeuralNetworksModel(neuralNetworksModel), mHexagonModel(hexagonModel) {}
+                             const std::shared_ptr<hexagon::Model>& hexagonModel)
+    : mNeuralNetworksModel(neuralNetworksModel), mHexagonModel(hexagonModel) {}
 
 PreparedModel::~PreparedModel() {}
 
 static void asyncExecute(const std::shared_ptr<hexagon::Model>& model, const Request& request,
                          const sp<IExecutionCallback>& callback) {
-    ErrorStatus status = model->execute(request) == true ?
-            ErrorStatus::NONE : ErrorStatus::GENERAL_FAILURE;
-    callback->notify(status);
+    ErrorStatus status =
+        model->execute(request) == true ? ErrorStatus::NONE : ErrorStatus::GENERAL_FAILURE;
+    Return<void> ret = callback->notify(status);
+    if (!ret.isOk()) {
+        LOG(ERROR) << "Error in callback's return type: " << ret.description();
+    }
 }
 
 Return<ErrorStatus> PreparedModel::execute(const Request& request,
@@ -46,18 +50,25 @@
         LOG(ERROR) << "invalid callback passed to execute";
         return ErrorStatus::INVALID_ARGUMENT;
     }
+
     if (!nn::validateRequest(request, mNeuralNetworksModel)) {
-        callback->notify(ErrorStatus::INVALID_ARGUMENT);
+        Return<void> ret = callback->notify(ErrorStatus::INVALID_ARGUMENT);
+        if (!ret.isOk()) {
+            LOG(ERROR) << "Error in callback's return type: " << ret.description();
+        }
         return ErrorStatus::INVALID_ARGUMENT;
     }
     if (!hexagon::isHexagonAvailable()) {
-        callback->notify(ErrorStatus::DEVICE_UNAVAILABLE);
+        Return<void> ret = callback->notify(ErrorStatus::DEVICE_UNAVAILABLE);
+        if (!ret.isOk()) {
+            LOG(ERROR) << "Error in callback's return type: " << ret.description();
+        }
         return ErrorStatus::DEVICE_UNAVAILABLE;
     }
 
-    // This thread is intentionally detached because the sample driver service
-    // is expected to live forever.
-    std::thread(asyncExecute, mHexagonModel, request, callback).detach();
+    // TODO: once nnlib hanging issue is resolved, make this function
+    // asynchronous again
+    asyncExecute(mHexagonModel, request, callback);
 
     return ErrorStatus::NONE;
 }
diff --git a/1.0/PreparedModel.h b/1.0/PreparedModel.h
index e2e7d09..c2edd99 100644
--- a/1.0/PreparedModel.h
+++ b/1.0/PreparedModel.h
@@ -17,12 +17,12 @@
 #ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_0_PREPAREDMODEL_H
 #define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_PREPAREDMODEL_H
 
-#include "HexagonModel.h"
-#include "hexagon_nn_controller/hexagon_nn_controller.h"
 #include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
 #include <hidl/MQDescriptor.h>
 #include <hidl/Status.h>
 #include <memory>
+#include "HexagonModel.h"
+#include "hexagon_nn_controller/hexagon_nn_controller.h"
 
 namespace android {
 namespace hardware {
@@ -30,23 +30,23 @@
 namespace V1_0 {
 namespace implementation {
 
+using ::android::sp;
 using ::android::hardware::hidl_array;
 using ::android::hardware::hidl_memory;
 using ::android::hardware::hidl_string;
 using ::android::hardware::hidl_vec;
 using ::android::hardware::Return;
 using ::android::hardware::Void;
-using ::android::sp;
 
 struct PreparedModel : public IPreparedModel {
-private:
-    PreparedModel()                                = delete;
-    PreparedModel(const PreparedModel&)            = delete;
-    PreparedModel(PreparedModel&&)                 = delete;
+   private:
+    PreparedModel() = delete;
+    PreparedModel(const PreparedModel&) = delete;
+    PreparedModel(PreparedModel&&) = delete;
     PreparedModel& operator=(const PreparedModel&) = delete;
-    PreparedModel& operator=(PreparedModel&&)      = delete;
+    PreparedModel& operator=(PreparedModel&&) = delete;
 
-public:
+   public:
     PreparedModel(const Model& neuralNetworksModel,
                   const std::shared_ptr<hexagon::Model>& hexagonModel);
     ~PreparedModel() override;
@@ -55,7 +55,7 @@
     Return<ErrorStatus> execute(const Request& request,
                                 const sp<IExecutionCallback>& callback) override;
 
-private:
+   private:
     Model mNeuralNetworksModel;
     std::shared_ptr<hexagon::Model> mHexagonModel;
 };
diff --git a/1.0/Service.cpp b/1.0/Service.cpp
index 1b52e67..f1f74e3 100644
--- a/1.0/Service.cpp
+++ b/1.0/Service.cpp
@@ -16,14 +16,14 @@
 
 #define LOG_TAG "android.hardware.neuralnetworks@1.0-service-hvx"
 
-#include "Device.h"
-#include <android/hardware/neuralnetworks/1.0/IDevice.h>
 #include <android-base/logging.h>
+#include <android/hardware/neuralnetworks/1.0/IDevice.h>
 #include <hidl/HidlTransportSupport.h>
+#include "Device.h"
 
 // Generated HIDL files
-using android::hardware::neuralnetworks::V1_0::implementation::Device;
 using android::hardware::neuralnetworks::V1_0::IDevice;
+using android::hardware::neuralnetworks::V1_0::implementation::Device;
 
 int main() {
     android::sp<IDevice> device = new Device();
diff --git a/1.0/android.hardware.neuralnetworks@1.0-service-hvx.rc b/1.0/android.hardware.neuralnetworks@1.0-service-hvx.rc
index 909c482..538b869 100644
--- a/1.0/android.hardware.neuralnetworks@1.0-service-hvx.rc
+++ b/1.0/android.hardware.neuralnetworks@1.0-service-hvx.rc
@@ -1,3 +1,19 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
 service neuralnetworks_hal_service_hvx /vendor/bin/hw/android.hardware.neuralnetworks@1.0-service-hvx
     class hal
     user system
diff --git a/1.0/hexagon_nn_controller/hexagon_nn_controller.h b/1.0/hexagon_nn_controller/hexagon_nn_controller.h
index b0c6aa5..cc1aada 100644
--- a/1.0/hexagon_nn_controller/hexagon_nn_controller.h
+++ b/1.0/hexagon_nn_controller/hexagon_nn_controller.h
@@ -24,7 +24,6 @@
 // includes
 #include "hexagon_nn_ops.h"
 
-
 // hexagon types
 
 typedef struct hexagon_nn_input {
@@ -60,97 +59,80 @@
 } hexagon_nn_padding_type;
 
 typedef struct hexagon_nn_tensordef {
-    unsigned int   batches;
-    unsigned int   height;
-    unsigned int   width;
-    unsigned int   depth;
+    unsigned int batches;
+    unsigned int height;
+    unsigned int width;
+    unsigned int depth;
     unsigned char* data;
-    int            dataLen;
-    unsigned int   data_valid_len;
-    unsigned int   unused;
+    int dataLen;
+    unsigned int data_valid_len;
+    unsigned int unused;
 } hexagon_nn_tensordef;
 
-
 // interface types
 
-typedef hexagon_nn_nn_id (*hexagon_nn_controller_init_fn)();
+typedef int (*hexagon_nn_controller_init_fn)(hexagon_nn_nn_id* g);
 
-typedef int (*hexagon_nn_controller_getlog_fn)(hexagon_nn_nn_id id,
-                                            unsigned char *buf,
-                                            unsigned int length);
+typedef int (*hexagon_nn_controller_getlog_fn)(hexagon_nn_nn_id id, unsigned char* buf,
+                                               unsigned int length);
 
-typedef int (*hexagon_nn_controller_snpprint_fn)(hexagon_nn_nn_id id,
-                                              unsigned char *buf,
-                                              unsigned int length);
+typedef int (*hexagon_nn_controller_snpprint_fn)(hexagon_nn_nn_id id, unsigned char* buf,
+                                                 unsigned int length);
 
 typedef int (*hexagon_nn_controller_set_debug_level_fn)(hexagon_nn_nn_id id, int level);
 
 typedef int (*hexagon_nn_controller_prepare_fn)(hexagon_nn_nn_id id);
 
-typedef int (*hexagon_nn_controller_append_node_fn)(hexagon_nn_nn_id id,
-                                                 unsigned int node_id,
-                                                 op_type operation,
-                                                 hexagon_nn_padding_type padding,
-                                                 const hexagon_nn_input *inputs,
-                                                 unsigned int num_inputs,
-                                                 const hexagon_nn_output *outputs,
-                                                 unsigned int num_outputs);
+typedef int (*hexagon_nn_controller_append_node_fn)(
+    hexagon_nn_nn_id id, unsigned int node_id, op_type operation, hexagon_nn_padding_type padding,
+    const hexagon_nn_input* inputs, unsigned int num_inputs, const hexagon_nn_output* outputs,
+    unsigned int num_outputs);
 
-typedef int (*hexagon_nn_controller_append_const_node_fn)(hexagon_nn_nn_id id,
-                                                       unsigned int node_id,
-                                                       unsigned int batches,
-                                                       unsigned int height,
-                                                       unsigned int width,
-                                                       unsigned int depth,
-                                                       const unsigned char *data,
-                                                       unsigned int data_len);
+typedef int (*hexagon_nn_controller_append_const_node_fn)(hexagon_nn_nn_id id, unsigned int node_id,
+                                                          unsigned int batches, unsigned int height,
+                                                          unsigned int width, unsigned int depth,
+                                                          const unsigned char* data,
+                                                          unsigned int data_len);
 
 typedef int (*hexagon_nn_controller_execute_new_fn)(hexagon_nn_nn_id id,
-                                                 const hexagon_nn_tensordef *inputs,
-                                                 unsigned int n_inputs,
-                                                 hexagon_nn_tensordef *outputs,
-                                                 unsigned int n_outputs);
+                                                    const hexagon_nn_tensordef* inputs,
+                                                    unsigned int n_inputs,
+                                                    hexagon_nn_tensordef* outputs,
+                                                    unsigned int n_outputs);
 
-typedef int (*hexagon_nn_controller_execute_fn)(hexagon_nn_nn_id id,
-                                             unsigned int batches_in,
-                                             unsigned int height_in,
-                                             unsigned int width_in,
-                                             unsigned int depth_in,
-                                             const unsigned char *data_in,
-                                             unsigned int data_len_in,
-                                             unsigned int *batches_out,
-                                             unsigned int *height_out,
-                                             unsigned int *width_out,
-                                             unsigned int *depth_out,
-                                             unsigned char *data_out,
-                                             unsigned int data_out_max,
-                                             unsigned int *data_out_size);
+typedef int (*hexagon_nn_controller_execute_fn)(hexagon_nn_nn_id id, unsigned int batches_in,
+                                                unsigned int height_in, unsigned int width_in,
+                                                unsigned int depth_in, const unsigned char* data_in,
+                                                unsigned int data_len_in, unsigned int* batches_out,
+                                                unsigned int* height_out, unsigned int* width_out,
+                                                unsigned int* depth_out, unsigned char* data_out,
+                                                unsigned int data_out_max,
+                                                unsigned int* data_out_size);
 
 typedef int (*hexagon_nn_controller_teardown_fn)(hexagon_nn_nn_id id);
 
 typedef int (*hexagon_nn_controller_get_perfinfo_fn)(hexagon_nn_nn_id id,
-                                                  hexagon_nn_perfinfo *info_out,
-                                                  unsigned int info_out_len,
-                                                  unsigned int *n_items_out);
+                                                     hexagon_nn_perfinfo* info_out,
+                                                     unsigned int info_out_len,
+                                                     unsigned int* n_items_out);
 
 typedef int (*hexagon_nn_controller_reset_perfinfo_fn)(hexagon_nn_nn_id id, unsigned int event);
 
-typedef int (*hexagon_nn_controller_version_fn)(int *ver);
+typedef int (*hexagon_nn_controller_version_fn)(int* ver);
 
 typedef int (*hexagon_nn_controller_last_execution_cycles_fn)(hexagon_nn_nn_id id,
-                                                           unsigned int *cycles_lo,
-                                                           unsigned int *cycles_hi);
+                                                              unsigned int* cycles_lo,
+                                                              unsigned int* cycles_hi);
 
-typedef int (*hexagon_nn_controller_GetHexagonBinaryVersion_fn)(int *ver);
+typedef int (*hexagon_nn_controller_GetHexagonBinaryVersion_fn)(int* ver);
 
-typedef int (*hexagon_nn_controller_PrintLog_fn)(const unsigned char *data_in,
+typedef int (*hexagon_nn_controller_PrintLog_fn)(const unsigned char* data_in,
                                                  unsigned int data_in_len);
 
-typedef int (*hexagon_nn_controller_op_name_to_id_fn)(const char *name, unsigned int *id);
+typedef int (*hexagon_nn_controller_op_name_to_id_fn)(const char* name, unsigned int* id);
 
-typedef int (*hexagon_nn_controller_op_id_to_name_fn)(const unsigned int id,
-                                                   char *name,
-                                                   int name_len);
+typedef int (*hexagon_nn_controller_op_id_to_name_fn)(const unsigned int id, char* name,
+                                                      int name_len);
 
 typedef int (*hexagon_nn_controller_disable_dcvs_fn)();
 
@@ -158,6 +140,11 @@
 
 typedef int (*hexagon_nn_controller_config_fn)();
 
+typedef unsigned int (*hexagon_nn_controller_get_dsp_offset_fn)();
+
+typedef int (*hexagon_nn_controller_boost_fn)(int bus_usage);
+
+typedef int (*hexagon_nn_controller_slow_fn)();
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/1.0/hexagon_nn_controller/ops.def b/1.0/hexagon_nn_controller/ops.def
index f8b91ee..28e0b8f 100644
--- a/1.0/hexagon_nn_controller/ops.def
+++ b/1.0/hexagon_nn_controller/ops.def
@@ -1,3 +1,38 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted (subject to the limitations in the
+ * disclaimer below) provided that the following conditions are met:
+ *
+ *    * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *
+ *    * Redistributions in binary form must reproduce the above
+ *      copyright notice, this list of conditions and the following
+ *      disclaimer in the documentation and/or other materials provided
+ *      with the distribution.
+ *
+ *    * Neither the name of The Linux Foundation nor the names of its
+ *      contributors may be used to endorse or promote products derived
+ *      from this software without specific prior written permission.
+ * '
+ * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
+ * GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
+ * HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
 /* 
  * You probably want to 
  * 
@@ -198,13 +233,13 @@
 DEF_OP_WREF(QuantizedFC_8x8p8to8)
 #endif
 
-DEF_OP_WREF(QuantizedAdd_8x8to8_d32)
+DEF_OP_WREF(QuantizedAdd_8p8to8_d32)
 
 DEF_OP_WREF(QuantizedClamp_8)
 DEF_OP(Clamp_f)
 DEF_OP(QuantizeForTest_d32)
 DEF_OP(Close_d32)
-DEF_OP_WREF(QuantizedSub_8x8to8_d32)
+DEF_OP_WREF(QuantizedSub_8p8to8_d32)
 
 DEF_OP(InputSupernode_8x8p8to8_outd32)
 DEF_OP(QuantizedLRN_8_d32)
@@ -217,6 +252,26 @@
 DEF_OP(DepthwiseSupernode_8x8p32to8_d32)
 DEF_OP(InputSupernode_8x8p32to8_outd32)
 
+DEF_OP(PPrint_8_d32)
+DEF_OP(PPrintWithPadding_8_d32)
+DEF_OP_WREF(AutoQuantize_d32)
+
+DEF_OP_WREF(QuantizedTanh_8_d32)
+DEF_OP_WREF(QuantizedSigmoid_8_d32)
+DEF_OP_WREF(QuantizedSoftmax_8_d32)
+
+
+DEF_OP_WREF(QuantizedL2Pool_8_d32)
+
+DEF_OP(Gather_f)
+DEF_OP(Gather_int32)
+DEF_OP(Gather_8)
+DEF_OP(Table_f)
+DEF_OP(Table_int32)
+DEF_OP(Table_8)
+
+DEF_OP(FillPadding_8_d32)
+DEF_OP(QuantizedResizeBilinear_8_d32)
 #ifdef __SELF_DEF_OP_WREF
 #undef __SELF_DEF_OP_WREF
 #undef DEF_OP_WREF
diff --git a/Android.bp b/Android.bp
index ad8c9c8..eba2a93 100644
--- a/Android.bp
+++ b/Android.bp
@@ -1 +1,17 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 subdirs=["neuralnetworks/hvxservice/1.0"]
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
new file mode 100644
index 0000000..14b5e8b
--- /dev/null
+++ b/PREUPLOAD.cfg
@@ -0,0 +1,21 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+[Options]
+ignore_merged_commits = true
+
+[Builtin Hooks]
+clang_format = true